fidl_fuchsia_sysmem2/fidl_fuchsia_sysmem2.rs
1// WARNING: This file is machine generated by fidlgen.
2
3#![warn(clippy::all)]
4#![allow(unused_parens, unused_mut, unused_imports, nonstandard_style)]
5
6use bitflags::bitflags;
7use fidl::client::QueryResponseFut;
8use fidl::encoding::{MessageBufFor, ProxyChannelBox, ResourceDialect};
9use fidl::endpoints::{ControlHandle as _, Responder as _};
10pub use fidl_fuchsia_sysmem2__common::*;
11use futures::future::{self, MaybeDone, TryFutureExt};
12use zx_status;
13
14#[derive(Debug, Default, PartialEq)]
15pub struct AllocatorAllocateNonSharedCollectionRequest {
16 pub collection_request: Option<fidl::endpoints::ServerEnd<BufferCollectionMarker>>,
17 #[doc(hidden)]
18 pub __source_breaking: fidl::marker::SourceBreaking,
19}
20
21impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
22 for AllocatorAllocateNonSharedCollectionRequest
23{
24}
25
26#[derive(Debug, Default, PartialEq)]
27pub struct AllocatorAllocateSharedCollectionRequest {
28 pub token_request: Option<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
29 #[doc(hidden)]
30 pub __source_breaking: fidl::marker::SourceBreaking,
31}
32
33impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
34 for AllocatorAllocateSharedCollectionRequest
35{
36}
37
38#[derive(Debug, Default, PartialEq)]
39pub struct AllocatorBindSharedCollectionRequest {
40 pub token: Option<fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>>,
41 pub buffer_collection_request: Option<fidl::endpoints::ServerEnd<BufferCollectionMarker>>,
42 #[doc(hidden)]
43 pub __source_breaking: fidl::marker::SourceBreaking,
44}
45
46impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
47 for AllocatorBindSharedCollectionRequest
48{
49}
50
51#[derive(Debug, Default, PartialEq)]
52pub struct AllocatorGetVmoInfoRequest {
53 /// `vmo` is required to be set; ownership is transferred to the server
54 /// so in most cases a client will duplicate a handle and transfer the
55 /// duplicate via this field.
56 pub vmo: Option<fidl::Vmo>,
57 #[doc(hidden)]
58 pub __source_breaking: fidl::marker::SourceBreaking,
59}
60
61impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
62 for AllocatorGetVmoInfoRequest
63{
64}
65
66#[derive(Debug, Default, PartialEq)]
67pub struct AllocatorGetVmoInfoResponse {
68 pub buffer_collection_id: Option<u64>,
69 pub buffer_index: Option<u64>,
70 pub close_weak_asap: Option<fidl::EventPair>,
71 #[doc(hidden)]
72 pub __source_breaking: fidl::marker::SourceBreaking,
73}
74
75impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
76 for AllocatorGetVmoInfoResponse
77{
78}
79
80#[derive(Debug, Default, PartialEq)]
81pub struct BufferCollectionAttachLifetimeTrackingRequest {
82 pub server_end: Option<fidl::EventPair>,
83 pub buffers_remaining: Option<u32>,
84 #[doc(hidden)]
85 pub __source_breaking: fidl::marker::SourceBreaking,
86}
87
88impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
89 for BufferCollectionAttachLifetimeTrackingRequest
90{
91}
92
93#[derive(Debug, Default, PartialEq)]
94pub struct BufferCollectionAttachTokenRequest {
95 pub rights_attenuation_mask: Option<fidl::Rights>,
96 pub token_request: Option<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
97 #[doc(hidden)]
98 pub __source_breaking: fidl::marker::SourceBreaking,
99}
100
101impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
102 for BufferCollectionAttachTokenRequest
103{
104}
105
106/// Information about a buffer collection and its buffers.
107#[derive(Debug, Default, PartialEq)]
108pub struct BufferCollectionInfo {
109 /// These settings apply to all the buffers in the initial buffer
110 /// allocation.
111 ///
112 /// This field will always be set by sysmem.
113 pub settings: Option<SingleBufferSettings>,
114 /// VMO handles (and vmo_usable_start offset) for each buffer in the
115 /// collection.
116 ///
117 /// The size of this vector is the buffer_count (buffer_count is not sent
118 /// separately).
119 ///
120 /// All buffer VMO handles have identical size and access rights. The size
121 /// is in settings.buffer_settings.size_bytes.
122 ///
123 /// The VMO access rights are determined based on the usages which the
124 /// client specified when allocating the buffer collection. For example, a
125 /// client which expressed a read-only usage will receive VMOs without write
126 /// rights. In addition, the rights can be attenuated by the parameter to
127 /// BufferCollectionToken.Duplicate() calls.
128 ///
129 /// This field will always have VmoBuffer(s) in it, even if the participant
130 /// specifies usage whieh does not require VMO handles. This permits such a
131 /// participant to know the vmo_usable_start values, in case that's of any
132 /// use to the participant.
133 ///
134 /// This field will always be set by sysmem, even if the participant doesn't
135 /// specify any buffer usage (but the [`fuchsia.sysmem2/VmoBuffer.vmo`]
136 /// sub-field within this field won't be set in that case).
137 pub buffers: Option<Vec<VmoBuffer>>,
138 /// This number is unique among all logical buffer collections per boot.
139 ///
140 /// This ID number will be the same for all BufferCollectionToken(s),
141 /// BufferCollection(s), and BufferCollectionTokenGroup(s) associated with
142 /// the same logical buffer collection (derived from the same root token
143 /// created with fuchsia.sysmem2.Allocator.CreateSharedCollection, or with
144 /// CreateNonSharedCollection).
145 ///
146 /// The same ID can be retrieved from a BufferCollectionToken,
147 /// BufferCollection, or BufferCollectionTokenGroup using
148 /// GetBufferCollectionId (at the cost of a round-trip to sysmem and back).
149 ///
150 /// This field will always be set by sysmem.
151 pub buffer_collection_id: Option<u64>,
152 #[doc(hidden)]
153 pub __source_breaking: fidl::marker::SourceBreaking,
154}
155
156impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect> for BufferCollectionInfo {}
157
158#[derive(Debug, Default, PartialEq)]
159pub struct BufferCollectionSetConstraintsRequest {
160 pub constraints: Option<BufferCollectionConstraints>,
161 #[doc(hidden)]
162 pub __source_breaking: fidl::marker::SourceBreaking,
163}
164
165impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
166 for BufferCollectionSetConstraintsRequest
167{
168}
169
170#[derive(Debug, Default, PartialEq)]
171pub struct BufferCollectionTokenCreateBufferCollectionTokenGroupRequest {
172 pub group_request: Option<fidl::endpoints::ServerEnd<BufferCollectionTokenGroupMarker>>,
173 #[doc(hidden)]
174 pub __source_breaking: fidl::marker::SourceBreaking,
175}
176
177impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
178 for BufferCollectionTokenCreateBufferCollectionTokenGroupRequest
179{
180}
181
182#[derive(Debug, Default, PartialEq)]
183pub struct BufferCollectionTokenDuplicateRequest {
184 pub rights_attenuation_mask: Option<fidl::Rights>,
185 pub token_request: Option<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
186 #[doc(hidden)]
187 pub __source_breaking: fidl::marker::SourceBreaking,
188}
189
190impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
191 for BufferCollectionTokenDuplicateRequest
192{
193}
194
195#[derive(Debug, Default, PartialEq)]
196pub struct BufferCollectionTokenGroupCreateChildRequest {
197 /// Must be set.
198 pub token_request: Option<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
199 /// If not set, the default is `ZX_RIGHT_SAME_RIGHTS`.
200 pub rights_attenuation_mask: Option<fidl::Rights>,
201 #[doc(hidden)]
202 pub __source_breaking: fidl::marker::SourceBreaking,
203}
204
205impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
206 for BufferCollectionTokenGroupCreateChildRequest
207{
208}
209
210#[derive(Debug, Default, PartialEq)]
211pub struct BufferCollectionTokenGroupCreateChildrenSyncResponse {
212 pub tokens: Option<Vec<fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>>>,
213 #[doc(hidden)]
214 pub __source_breaking: fidl::marker::SourceBreaking,
215}
216
217impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
218 for BufferCollectionTokenGroupCreateChildrenSyncResponse
219{
220}
221
222#[derive(Debug, Default, PartialEq)]
223pub struct BufferCollectionTokenDuplicateSyncResponse {
224 pub tokens: Option<Vec<fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>>>,
225 #[doc(hidden)]
226 pub __source_breaking: fidl::marker::SourceBreaking,
227}
228
229impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
230 for BufferCollectionTokenDuplicateSyncResponse
231{
232}
233
234#[derive(Debug, Default, PartialEq)]
235pub struct BufferCollectionWaitForAllBuffersAllocatedResponse {
236 pub buffer_collection_info: Option<BufferCollectionInfo>,
237 #[doc(hidden)]
238 pub __source_breaking: fidl::marker::SourceBreaking,
239}
240
241impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
242 for BufferCollectionWaitForAllBuffersAllocatedResponse
243{
244}
245
246#[derive(Debug, Default, PartialEq)]
247pub struct NodeAttachNodeTrackingRequest {
248 /// This field must be set. This evenpair end will be closed after the
249 /// `Node` is closed or failed and the node's buffer counts are no
250 /// longer in effect in the logical buffer collection.
251 pub server_end: Option<fidl::EventPair>,
252 #[doc(hidden)]
253 pub __source_breaking: fidl::marker::SourceBreaking,
254}
255
256impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
257 for NodeAttachNodeTrackingRequest
258{
259}
260
261#[derive(Debug, Default, PartialEq)]
262pub struct NodeIsAlternateForRequest {
263 pub node_ref: Option<fidl::Event>,
264 #[doc(hidden)]
265 pub __source_breaking: fidl::marker::SourceBreaking,
266}
267
268impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect> for NodeIsAlternateForRequest {}
269
270#[derive(Debug, Default, PartialEq)]
271pub struct NodeSetWeakOkRequest {
272 pub for_child_nodes_also: Option<bool>,
273 #[doc(hidden)]
274 pub __source_breaking: fidl::marker::SourceBreaking,
275}
276
277impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect> for NodeSetWeakOkRequest {}
278
279#[derive(Debug, Default, PartialEq)]
280pub struct NodeGetNodeRefResponse {
281 pub node_ref: Option<fidl::Event>,
282 #[doc(hidden)]
283 pub __source_breaking: fidl::marker::SourceBreaking,
284}
285
286impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect> for NodeGetNodeRefResponse {}
287
288#[derive(Debug, Default, PartialEq)]
289pub struct VmoBuffer {
290 /// `vmo` can be un-set if a participant has only
291 /// [`fuchsia.sysmem2/BufferUsage.none`] set to `NONE_USAGE` (explicitly or
292 /// implicitly by [`fuchsia.sysmem2/BufferCollection.SetConstraints`]
293 /// without `constraints` set).
294 pub vmo: Option<fidl::Vmo>,
295 /// Offset within the VMO of the first usable byte. Must be < the VMO's size
296 /// in bytes, and leave sufficient room for BufferMemorySettings.size_bytes
297 /// before the end of the VMO.
298 ///
299 /// Currently sysmem will always set this field to 0, and in future, sysmem
300 /// won't set this field to a non-zero value unless all participants have
301 /// explicitly indicated support for non-zero vmo_usable_start (this
302 /// mechanism does not exist as of this comment). A participant that hasn't
303 /// explicitly indicated support for non-zero vmo_usable_start (all current
304 /// clients) should implicitly assume this field is set to 0 without
305 /// actually checking this field.
306 pub vmo_usable_start: Option<u64>,
307 /// This field is set iff `vmo` is a sysmem weak VMO handle. The client must
308 /// keep `close_weak_asap` around for as long as `vmo`, and must notice
309 /// `ZX_EVENTPAIR_PEER_CLOSED`. If that signal occurs, the client must close
310 /// `vmo` asap. Not doing so is considered a VMO leak by the client and in
311 /// that case sysmem will eventually complain loudly via syslog (currently
312 /// 5s later).
313 pub close_weak_asap: Option<fidl::EventPair>,
314 #[doc(hidden)]
315 pub __source_breaking: fidl::marker::SourceBreaking,
316}
317
318impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect> for VmoBuffer {}
319
320#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
321pub struct AllocatorMarker;
322
323impl fidl::endpoints::ProtocolMarker for AllocatorMarker {
324 type Proxy = AllocatorProxy;
325 type RequestStream = AllocatorRequestStream;
326 #[cfg(target_os = "fuchsia")]
327 type SynchronousProxy = AllocatorSynchronousProxy;
328
329 const DEBUG_NAME: &'static str = "fuchsia.sysmem2.Allocator";
330}
331impl fidl::endpoints::DiscoverableProtocolMarker for AllocatorMarker {}
332pub type AllocatorGetVmoInfoResult = Result<AllocatorGetVmoInfoResponse, Error>;
333
334pub trait AllocatorProxyInterface: Send + Sync {
335 fn r#allocate_non_shared_collection(
336 &self,
337 payload: AllocatorAllocateNonSharedCollectionRequest,
338 ) -> Result<(), fidl::Error>;
339 fn r#allocate_shared_collection(
340 &self,
341 payload: AllocatorAllocateSharedCollectionRequest,
342 ) -> Result<(), fidl::Error>;
343 fn r#bind_shared_collection(
344 &self,
345 payload: AllocatorBindSharedCollectionRequest,
346 ) -> Result<(), fidl::Error>;
347 type ValidateBufferCollectionTokenResponseFut: std::future::Future<
348 Output = Result<AllocatorValidateBufferCollectionTokenResponse, fidl::Error>,
349 > + Send;
350 fn r#validate_buffer_collection_token(
351 &self,
352 payload: &AllocatorValidateBufferCollectionTokenRequest,
353 ) -> Self::ValidateBufferCollectionTokenResponseFut;
354 fn r#set_debug_client_info(
355 &self,
356 payload: &AllocatorSetDebugClientInfoRequest,
357 ) -> Result<(), fidl::Error>;
358 type GetVmoInfoResponseFut: std::future::Future<Output = Result<AllocatorGetVmoInfoResult, fidl::Error>>
359 + Send;
360 fn r#get_vmo_info(&self, payload: AllocatorGetVmoInfoRequest) -> Self::GetVmoInfoResponseFut;
361}
362#[derive(Debug)]
363#[cfg(target_os = "fuchsia")]
364pub struct AllocatorSynchronousProxy {
365 client: fidl::client::sync::Client,
366}
367
368#[cfg(target_os = "fuchsia")]
369impl fidl::endpoints::SynchronousProxy for AllocatorSynchronousProxy {
370 type Proxy = AllocatorProxy;
371 type Protocol = AllocatorMarker;
372
373 fn from_channel(inner: fidl::Channel) -> Self {
374 Self::new(inner)
375 }
376
377 fn into_channel(self) -> fidl::Channel {
378 self.client.into_channel()
379 }
380
381 fn as_channel(&self) -> &fidl::Channel {
382 self.client.as_channel()
383 }
384}
385
386#[cfg(target_os = "fuchsia")]
387impl AllocatorSynchronousProxy {
388 pub fn new(channel: fidl::Channel) -> Self {
389 let protocol_name = <AllocatorMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
390 Self { client: fidl::client::sync::Client::new(channel, protocol_name) }
391 }
392
393 pub fn into_channel(self) -> fidl::Channel {
394 self.client.into_channel()
395 }
396
397 /// Waits until an event arrives and returns it. It is safe for other
398 /// threads to make concurrent requests while waiting for an event.
399 pub fn wait_for_event(
400 &self,
401 deadline: zx::MonotonicInstant,
402 ) -> Result<AllocatorEvent, fidl::Error> {
403 AllocatorEvent::decode(self.client.wait_for_event(deadline)?)
404 }
405
406 /// Allocates a buffer collection on behalf of a single client (aka
407 /// initiator) who is also the only participant (from the point of view of
408 /// sysmem).
409 ///
410 /// This call exists mainly for temp/testing purposes. This call skips the
411 /// [`fuchsia.sysmem2/BufferCollectionToken`] stage, so there's no way to
412 /// allow another participant to specify its constraints.
413 ///
414 /// Real clients are encouraged to use
415 /// [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`] instead, and to
416 /// let relevant participants directly convey their own constraints to
417 /// sysmem by sending `BufferCollectionToken`s to those participants.
418 ///
419 /// + request `collection_request` The server end of the
420 /// [`fuchsia.sysmem2/BufferCollection`].
421 pub fn r#allocate_non_shared_collection(
422 &self,
423 mut payload: AllocatorAllocateNonSharedCollectionRequest,
424 ) -> Result<(), fidl::Error> {
425 self.client.send::<AllocatorAllocateNonSharedCollectionRequest>(
426 &mut payload,
427 0x5ca681f025a80e44,
428 fidl::encoding::DynamicFlags::FLEXIBLE,
429 )
430 }
431
432 /// Creates a root [`fuchsia.sysmem2/BufferCollectionToken`].
433 ///
434 /// The `BufferCollectionToken` can be "duplicated" for distribution to
435 /// participants by using
436 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`]. Each
437 /// `BufferCollectionToken` can be converted into a
438 /// [`fuchsia.sysmem2.BufferCollection`] using
439 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`].
440 ///
441 /// Buffer constraints can be set via
442 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
443 ///
444 /// Success/failure to populate the buffer collection with buffers can be
445 /// determined from
446 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
447 ///
448 /// Closing the client end of a `BufferCollectionToken` or
449 /// `BufferCollection` (without `Release` first) will fail all client ends
450 /// in the same failure domain, which by default is all client ends of the
451 /// buffer collection. See
452 /// [`fuchsia.sysmem2/BufferCollection.SetDispensable`] and
453 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`] for ways to create
454 /// separate failure domains within a buffer collection.
455 pub fn r#allocate_shared_collection(
456 &self,
457 mut payload: AllocatorAllocateSharedCollectionRequest,
458 ) -> Result<(), fidl::Error> {
459 self.client.send::<AllocatorAllocateSharedCollectionRequest>(
460 &mut payload,
461 0x11a19ff51f0b49c1,
462 fidl::encoding::DynamicFlags::FLEXIBLE,
463 )
464 }
465
466 /// Convert a [`fuchsia.sysmem2/BufferCollectionToken`] into a
467 /// [`fuchsia.sysmem2/BufferCollection`].
468 ///
469 /// At the time of sending this message, the buffer collection hasn't yet
470 /// been populated with buffers - the participant must first also send
471 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] via the
472 /// `BufferCollection` client end.
473 ///
474 /// All `BufferCollectionToken`(s) duplicated from a root
475 /// `BufferCollectionToken` (created via `AllocateSharedCollection`) must be
476 /// "turned in" via `BindSharedCollection` (or `Release`ed), and all
477 /// existing `BufferCollection` client ends must have sent `SetConstraints`
478 /// before the logical BufferCollection will be populated with buffers (or
479 /// will fail if the overall set of constraints can't be satisfied).
480 ///
481 /// + request `token` The client endpoint of a channel whose server end was
482 /// sent to sysmem using
483 /// [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`] or whose server
484 /// end was sent to sysmem using
485 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`]. The token is
486 /// being "turned in" in exchange for a
487 /// [`fuchsia.sysmem2/BufferCollection`].
488 /// + request `buffer_collection_request` The server end of a
489 /// [`fuchsia.sysmem2/BufferCollection`] channel. The sender retains the
490 /// client end. The `BufferCollection` channel is a single participant's
491 /// connection to the logical buffer collection. Typically there will be
492 /// other participants with their own `BufferCollection` channel to the
493 /// logical buffer collection.
494 pub fn r#bind_shared_collection(
495 &self,
496 mut payload: AllocatorBindSharedCollectionRequest,
497 ) -> Result<(), fidl::Error> {
498 self.client.send::<AllocatorBindSharedCollectionRequest>(
499 &mut payload,
500 0x550916b0dc1d5b4e,
501 fidl::encoding::DynamicFlags::FLEXIBLE,
502 )
503 }
504
505 /// Checks whether a [`fuchsia.sysmem2/BufferCollectionToken`] is known to
506 /// the sysmem server.
507 ///
508 /// With this call, the client can determine whether an incoming token is a
509 /// real sysmem token that is known to the sysmem server, without any risk
510 /// of getting stuck waiting forever on a potentially fake token to complete
511 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] or
512 /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] (or any other two-way
513 /// FIDL message). In cases where the client trusts the source of the token
514 /// to provide a real token, this call is not typically needed outside of
515 /// debugging.
516 ///
517 /// If the validate fails sometimes but succeeds other times, the source of
518 /// the token may itself not be calling
519 /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] or
520 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after creating/duplicating the
521 /// token but before sending the token to the current client. It may be more
522 /// convenient for the source to use
523 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] to duplicate
524 /// token(s), since that call has the sync step built in. Or, the buffer
525 /// collection may be failing before this call is processed by the sysmem
526 /// server, as buffer collection failure cleans up sysmem's tracking of
527 /// associated tokens.
528 ///
529 /// This call has no effect on any token.
530 ///
531 /// + request `token_server_koid` The koid of the server end of a channel
532 /// that might be a BufferCollectionToken channel. This can be obtained
533 /// via `zx_object_get_info` `ZX_INFO_HANDLE_BASIC` `related_koid`.
534 /// - response `is_known` true means sysmem knew of the token at the time
535 /// sysmem processed the request, but doesn't guarantee that the token is
536 /// still valid by the time the client receives the reply. What it does
537 /// guarantee is that the token at least was a real token, so a two-way
538 /// call to the token won't stall forever (will fail or succeed fairly
539 /// quickly, not stall). This can already be known implicitly if the
540 /// source of the token can be trusted to provide a real token. A false
541 /// value means the token wasn't known to sysmem at the time sysmem
542 /// processed this call, but the token may have previously been valid, or
543 /// may yet become valid. Or if the sender of the token isn't trusted to
544 /// provide a real token, the token may be fake. It's the responsibility
545 /// of the sender to sync with sysmem to ensure that previously
546 /// created/duplicated token(s) are known to sysmem, before sending the
547 /// token(s) to other participants.
548 pub fn r#validate_buffer_collection_token(
549 &self,
550 mut payload: &AllocatorValidateBufferCollectionTokenRequest,
551 ___deadline: zx::MonotonicInstant,
552 ) -> Result<AllocatorValidateBufferCollectionTokenResponse, fidl::Error> {
553 let _response = self.client.send_query::<
554 AllocatorValidateBufferCollectionTokenRequest,
555 fidl::encoding::FlexibleType<AllocatorValidateBufferCollectionTokenResponse>,
556 >(
557 payload,
558 0x4c5ee91b02a7e68d,
559 fidl::encoding::DynamicFlags::FLEXIBLE,
560 ___deadline,
561 )?
562 .into_result::<AllocatorMarker>("validate_buffer_collection_token")?;
563 Ok(_response)
564 }
565
566 /// Set information about the current client that can be used by sysmem to
567 /// help diagnose leaking memory and allocation stalls waiting for a
568 /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
569 ///
570 /// This sets the debug client info on all [`fuchsia.sysmem2/Node`](s)
571 /// subsequently created by this this [`fuchsia.sysmem2/Allocator`]
572 /// including any [`fuchsia.sysmem2/BufferCollection`](s) created via
573 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] (in the absence of
574 /// any prior call to [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`],
575 /// these `BufferCollection`(s) have the same initial debug client info as
576 /// the token turned in to create the `BufferCollection`).
577 ///
578 /// This info can be subsequently overridden on a per-`Node` basis by
579 /// sending [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
580 ///
581 /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
582 /// `Allocator` is the most efficient way to ensure that all
583 /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
584 /// set, and is also more efficient than separately sending the same debug
585 /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
586 /// created [`fuchsia.sysmem2/Node`].
587 ///
588 /// + request `name` This can be an arbitrary string, but the current
589 /// process name (see `fsl::GetCurrentProcessName`) is a good default.
590 /// + request `id` This can be an arbitrary id, but the current process ID
591 /// (see `fsl::GetCurrentProcessKoid`) is a good default.
592 pub fn r#set_debug_client_info(
593 &self,
594 mut payload: &AllocatorSetDebugClientInfoRequest,
595 ) -> Result<(), fidl::Error> {
596 self.client.send::<AllocatorSetDebugClientInfoRequest>(
597 payload,
598 0x6f68f19a3f509c4d,
599 fidl::encoding::DynamicFlags::FLEXIBLE,
600 )
601 }
602
603 /// Given a handle to a sysmem-provided VMO, this returns additional info
604 /// about the corresponding sysmem logical buffer.
605 ///
606 /// Most callers will duplicate a VMO handle first and send the duplicate to
607 /// this call.
608 ///
609 /// If the client has created a child VMO of a sysmem-provided VMO, that
610 /// child VMO isn't considered a "sysmem VMO" for purposes of this call.
611 ///
612 /// + request `vmo` A handle to a sysmem-provided VMO (or see errors).
613 /// - response `buffer_collection_id` The buffer collection ID, which is
614 /// unique per logical buffer collection per boot.
615 /// - response `buffer_index` The buffer index of the buffer within the
616 /// buffer collection. This is the same as the index of the buffer within
617 /// [`fuchsia.sysmem2/BufferCollectionInfo.buffers`]. The `buffer_index`
618 /// is the same for all sysmem-delivered VMOs corresponding to the same
619 /// logical buffer, even if the VMO koids differ. The `buffer_index` is
620 /// only unique across buffers of a buffer collection. For a given buffer,
621 /// the combination of `buffer_collection_id` and `buffer_index` is unique
622 /// per boot.
623 /// - response `close_weak_asap` Iff `vmo` is a handle to a weak sysmem VMO,
624 /// the `close_weak_asap` field will be set in the response. This handle
625 /// will signal `ZX_EVENTPAIR_PEER_CLOSED` when all weak VMO handles to
626 /// the buffer should be closed as soon as possible. This is signalled
627 /// shortly after all strong sysmem VMOs to the buffer are closed
628 /// (including any held indirectly via strong `BufferCollectionToken` or
629 /// strong `BufferCollection`). Failure to close all weak sysmem VMO
630 /// handles to the buffer quickly upon `ZX_EVENTPAIR_PEER_CLOSED` is
631 /// considered a VMO leak caused by the client still holding a weak sysmem
632 /// VMO handle and results in loud complaints to the log by sysmem. The
633 /// buffers of a collection can be freed independently of each other. The
634 /// `ZX_EVENTPAIR_PEER_CLOSED` may already be signalled before the
635 /// response arrives at the client. A client that isn't prepared to handle
636 /// weak sysmem VMOs, on seeing this field set, can close all handles to
637 /// the buffer and fail any associated request.
638 /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` - the vmo isn't a sysmem
639 /// VMO. Both strong and weak sysmem VMOs can be passed to this call, and
640 /// the VMO handle passed in to this call itself keeps the VMO's info
641 /// alive for purposes of responding to this call. Because of this,
642 /// ZX_ERR_NOT_FOUND errors are unambiguous (even if there are no other
643 /// handles to the VMO when calling; even if other handles are closed
644 /// before the GetVmoInfo response arrives at the client).
645 /// * error `[fuchsia.sysmem2/Error.HANDLE_ACCESS_DENIED]` The vmo isn't
646 /// capable of being used with GetVmoInfo due to rights/capability
647 /// attenuation. The VMO needs to be usable with [`zx_vmo_get_info`] with
648 /// topic [`ZX_INFO_HANDLE_BASIC`].
649 /// * error `[fuchsia.sysmem2/Error.UNSPECIFIED]` The request failed for an
650 /// unspecified reason. See the log for more info.
651 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The vmo field
652 /// wasn't set, or there was some other problem with the request field(s).
653 pub fn r#get_vmo_info(
654 &self,
655 mut payload: AllocatorGetVmoInfoRequest,
656 ___deadline: zx::MonotonicInstant,
657 ) -> Result<AllocatorGetVmoInfoResult, fidl::Error> {
658 let _response = self.client.send_query::<
659 AllocatorGetVmoInfoRequest,
660 fidl::encoding::FlexibleResultType<AllocatorGetVmoInfoResponse, Error>,
661 >(
662 &mut payload,
663 0x21a881120aa0ddf9,
664 fidl::encoding::DynamicFlags::FLEXIBLE,
665 ___deadline,
666 )?
667 .into_result::<AllocatorMarker>("get_vmo_info")?;
668 Ok(_response.map(|x| x))
669 }
670}
671
672#[cfg(target_os = "fuchsia")]
673impl From<AllocatorSynchronousProxy> for zx::NullableHandle {
674 fn from(value: AllocatorSynchronousProxy) -> Self {
675 value.into_channel().into()
676 }
677}
678
679#[cfg(target_os = "fuchsia")]
680impl From<fidl::Channel> for AllocatorSynchronousProxy {
681 fn from(value: fidl::Channel) -> Self {
682 Self::new(value)
683 }
684}
685
686#[cfg(target_os = "fuchsia")]
687impl fidl::endpoints::FromClient for AllocatorSynchronousProxy {
688 type Protocol = AllocatorMarker;
689
690 fn from_client(value: fidl::endpoints::ClientEnd<AllocatorMarker>) -> Self {
691 Self::new(value.into_channel())
692 }
693}
694
695#[derive(Debug, Clone)]
696pub struct AllocatorProxy {
697 client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
698}
699
700impl fidl::endpoints::Proxy for AllocatorProxy {
701 type Protocol = AllocatorMarker;
702
703 fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
704 Self::new(inner)
705 }
706
707 fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
708 self.client.into_channel().map_err(|client| Self { client })
709 }
710
711 fn as_channel(&self) -> &::fidl::AsyncChannel {
712 self.client.as_channel()
713 }
714}
715
716impl AllocatorProxy {
717 /// Create a new Proxy for fuchsia.sysmem2/Allocator.
718 pub fn new(channel: ::fidl::AsyncChannel) -> Self {
719 let protocol_name = <AllocatorMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
720 Self { client: fidl::client::Client::new(channel, protocol_name) }
721 }
722
723 /// Get a Stream of events from the remote end of the protocol.
724 ///
725 /// # Panics
726 ///
727 /// Panics if the event stream was already taken.
728 pub fn take_event_stream(&self) -> AllocatorEventStream {
729 AllocatorEventStream { event_receiver: self.client.take_event_receiver() }
730 }
731
732 /// Allocates a buffer collection on behalf of a single client (aka
733 /// initiator) who is also the only participant (from the point of view of
734 /// sysmem).
735 ///
736 /// This call exists mainly for temp/testing purposes. This call skips the
737 /// [`fuchsia.sysmem2/BufferCollectionToken`] stage, so there's no way to
738 /// allow another participant to specify its constraints.
739 ///
740 /// Real clients are encouraged to use
741 /// [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`] instead, and to
742 /// let relevant participants directly convey their own constraints to
743 /// sysmem by sending `BufferCollectionToken`s to those participants.
744 ///
745 /// + request `collection_request` The server end of the
746 /// [`fuchsia.sysmem2/BufferCollection`].
747 pub fn r#allocate_non_shared_collection(
748 &self,
749 mut payload: AllocatorAllocateNonSharedCollectionRequest,
750 ) -> Result<(), fidl::Error> {
751 AllocatorProxyInterface::r#allocate_non_shared_collection(self, payload)
752 }
753
754 /// Creates a root [`fuchsia.sysmem2/BufferCollectionToken`].
755 ///
756 /// The `BufferCollectionToken` can be "duplicated" for distribution to
757 /// participants by using
758 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`]. Each
759 /// `BufferCollectionToken` can be converted into a
760 /// [`fuchsia.sysmem2.BufferCollection`] using
761 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`].
762 ///
763 /// Buffer constraints can be set via
764 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
765 ///
766 /// Success/failure to populate the buffer collection with buffers can be
767 /// determined from
768 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
769 ///
770 /// Closing the client end of a `BufferCollectionToken` or
771 /// `BufferCollection` (without `Release` first) will fail all client ends
772 /// in the same failure domain, which by default is all client ends of the
773 /// buffer collection. See
774 /// [`fuchsia.sysmem2/BufferCollection.SetDispensable`] and
775 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`] for ways to create
776 /// separate failure domains within a buffer collection.
777 pub fn r#allocate_shared_collection(
778 &self,
779 mut payload: AllocatorAllocateSharedCollectionRequest,
780 ) -> Result<(), fidl::Error> {
781 AllocatorProxyInterface::r#allocate_shared_collection(self, payload)
782 }
783
784 /// Convert a [`fuchsia.sysmem2/BufferCollectionToken`] into a
785 /// [`fuchsia.sysmem2/BufferCollection`].
786 ///
787 /// At the time of sending this message, the buffer collection hasn't yet
788 /// been populated with buffers - the participant must first also send
789 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] via the
790 /// `BufferCollection` client end.
791 ///
792 /// All `BufferCollectionToken`(s) duplicated from a root
793 /// `BufferCollectionToken` (created via `AllocateSharedCollection`) must be
794 /// "turned in" via `BindSharedCollection` (or `Release`ed), and all
795 /// existing `BufferCollection` client ends must have sent `SetConstraints`
796 /// before the logical BufferCollection will be populated with buffers (or
797 /// will fail if the overall set of constraints can't be satisfied).
798 ///
799 /// + request `token` The client endpoint of a channel whose server end was
800 /// sent to sysmem using
801 /// [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`] or whose server
802 /// end was sent to sysmem using
803 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`]. The token is
804 /// being "turned in" in exchange for a
805 /// [`fuchsia.sysmem2/BufferCollection`].
806 /// + request `buffer_collection_request` The server end of a
807 /// [`fuchsia.sysmem2/BufferCollection`] channel. The sender retains the
808 /// client end. The `BufferCollection` channel is a single participant's
809 /// connection to the logical buffer collection. Typically there will be
810 /// other participants with their own `BufferCollection` channel to the
811 /// logical buffer collection.
812 pub fn r#bind_shared_collection(
813 &self,
814 mut payload: AllocatorBindSharedCollectionRequest,
815 ) -> Result<(), fidl::Error> {
816 AllocatorProxyInterface::r#bind_shared_collection(self, payload)
817 }
818
819 /// Checks whether a [`fuchsia.sysmem2/BufferCollectionToken`] is known to
820 /// the sysmem server.
821 ///
822 /// With this call, the client can determine whether an incoming token is a
823 /// real sysmem token that is known to the sysmem server, without any risk
824 /// of getting stuck waiting forever on a potentially fake token to complete
825 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] or
826 /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] (or any other two-way
827 /// FIDL message). In cases where the client trusts the source of the token
828 /// to provide a real token, this call is not typically needed outside of
829 /// debugging.
830 ///
831 /// If the validate fails sometimes but succeeds other times, the source of
832 /// the token may itself not be calling
833 /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] or
834 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after creating/duplicating the
835 /// token but before sending the token to the current client. It may be more
836 /// convenient for the source to use
837 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] to duplicate
838 /// token(s), since that call has the sync step built in. Or, the buffer
839 /// collection may be failing before this call is processed by the sysmem
840 /// server, as buffer collection failure cleans up sysmem's tracking of
841 /// associated tokens.
842 ///
843 /// This call has no effect on any token.
844 ///
845 /// + request `token_server_koid` The koid of the server end of a channel
846 /// that might be a BufferCollectionToken channel. This can be obtained
847 /// via `zx_object_get_info` `ZX_INFO_HANDLE_BASIC` `related_koid`.
848 /// - response `is_known` true means sysmem knew of the token at the time
849 /// sysmem processed the request, but doesn't guarantee that the token is
850 /// still valid by the time the client receives the reply. What it does
851 /// guarantee is that the token at least was a real token, so a two-way
852 /// call to the token won't stall forever (will fail or succeed fairly
853 /// quickly, not stall). This can already be known implicitly if the
854 /// source of the token can be trusted to provide a real token. A false
855 /// value means the token wasn't known to sysmem at the time sysmem
856 /// processed this call, but the token may have previously been valid, or
857 /// may yet become valid. Or if the sender of the token isn't trusted to
858 /// provide a real token, the token may be fake. It's the responsibility
859 /// of the sender to sync with sysmem to ensure that previously
860 /// created/duplicated token(s) are known to sysmem, before sending the
861 /// token(s) to other participants.
862 pub fn r#validate_buffer_collection_token(
863 &self,
864 mut payload: &AllocatorValidateBufferCollectionTokenRequest,
865 ) -> fidl::client::QueryResponseFut<
866 AllocatorValidateBufferCollectionTokenResponse,
867 fidl::encoding::DefaultFuchsiaResourceDialect,
868 > {
869 AllocatorProxyInterface::r#validate_buffer_collection_token(self, payload)
870 }
871
872 /// Set information about the current client that can be used by sysmem to
873 /// help diagnose leaking memory and allocation stalls waiting for a
874 /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
875 ///
876 /// This sets the debug client info on all [`fuchsia.sysmem2/Node`](s)
877 /// subsequently created by this this [`fuchsia.sysmem2/Allocator`]
878 /// including any [`fuchsia.sysmem2/BufferCollection`](s) created via
879 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] (in the absence of
880 /// any prior call to [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`],
881 /// these `BufferCollection`(s) have the same initial debug client info as
882 /// the token turned in to create the `BufferCollection`).
883 ///
884 /// This info can be subsequently overridden on a per-`Node` basis by
885 /// sending [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
886 ///
887 /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
888 /// `Allocator` is the most efficient way to ensure that all
889 /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
890 /// set, and is also more efficient than separately sending the same debug
891 /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
892 /// created [`fuchsia.sysmem2/Node`].
893 ///
894 /// + request `name` This can be an arbitrary string, but the current
895 /// process name (see `fsl::GetCurrentProcessName`) is a good default.
896 /// + request `id` This can be an arbitrary id, but the current process ID
897 /// (see `fsl::GetCurrentProcessKoid`) is a good default.
898 pub fn r#set_debug_client_info(
899 &self,
900 mut payload: &AllocatorSetDebugClientInfoRequest,
901 ) -> Result<(), fidl::Error> {
902 AllocatorProxyInterface::r#set_debug_client_info(self, payload)
903 }
904
905 /// Given a handle to a sysmem-provided VMO, this returns additional info
906 /// about the corresponding sysmem logical buffer.
907 ///
908 /// Most callers will duplicate a VMO handle first and send the duplicate to
909 /// this call.
910 ///
911 /// If the client has created a child VMO of a sysmem-provided VMO, that
912 /// child VMO isn't considered a "sysmem VMO" for purposes of this call.
913 ///
914 /// + request `vmo` A handle to a sysmem-provided VMO (or see errors).
915 /// - response `buffer_collection_id` The buffer collection ID, which is
916 /// unique per logical buffer collection per boot.
917 /// - response `buffer_index` The buffer index of the buffer within the
918 /// buffer collection. This is the same as the index of the buffer within
919 /// [`fuchsia.sysmem2/BufferCollectionInfo.buffers`]. The `buffer_index`
920 /// is the same for all sysmem-delivered VMOs corresponding to the same
921 /// logical buffer, even if the VMO koids differ. The `buffer_index` is
922 /// only unique across buffers of a buffer collection. For a given buffer,
923 /// the combination of `buffer_collection_id` and `buffer_index` is unique
924 /// per boot.
925 /// - response `close_weak_asap` Iff `vmo` is a handle to a weak sysmem VMO,
926 /// the `close_weak_asap` field will be set in the response. This handle
927 /// will signal `ZX_EVENTPAIR_PEER_CLOSED` when all weak VMO handles to
928 /// the buffer should be closed as soon as possible. This is signalled
929 /// shortly after all strong sysmem VMOs to the buffer are closed
930 /// (including any held indirectly via strong `BufferCollectionToken` or
931 /// strong `BufferCollection`). Failure to close all weak sysmem VMO
932 /// handles to the buffer quickly upon `ZX_EVENTPAIR_PEER_CLOSED` is
933 /// considered a VMO leak caused by the client still holding a weak sysmem
934 /// VMO handle and results in loud complaints to the log by sysmem. The
935 /// buffers of a collection can be freed independently of each other. The
936 /// `ZX_EVENTPAIR_PEER_CLOSED` may already be signalled before the
937 /// response arrives at the client. A client that isn't prepared to handle
938 /// weak sysmem VMOs, on seeing this field set, can close all handles to
939 /// the buffer and fail any associated request.
940 /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` - the vmo isn't a sysmem
941 /// VMO. Both strong and weak sysmem VMOs can be passed to this call, and
942 /// the VMO handle passed in to this call itself keeps the VMO's info
943 /// alive for purposes of responding to this call. Because of this,
944 /// ZX_ERR_NOT_FOUND errors are unambiguous (even if there are no other
945 /// handles to the VMO when calling; even if other handles are closed
946 /// before the GetVmoInfo response arrives at the client).
947 /// * error `[fuchsia.sysmem2/Error.HANDLE_ACCESS_DENIED]` The vmo isn't
948 /// capable of being used with GetVmoInfo due to rights/capability
949 /// attenuation. The VMO needs to be usable with [`zx_vmo_get_info`] with
950 /// topic [`ZX_INFO_HANDLE_BASIC`].
951 /// * error `[fuchsia.sysmem2/Error.UNSPECIFIED]` The request failed for an
952 /// unspecified reason. See the log for more info.
953 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The vmo field
954 /// wasn't set, or there was some other problem with the request field(s).
955 pub fn r#get_vmo_info(
956 &self,
957 mut payload: AllocatorGetVmoInfoRequest,
958 ) -> fidl::client::QueryResponseFut<
959 AllocatorGetVmoInfoResult,
960 fidl::encoding::DefaultFuchsiaResourceDialect,
961 > {
962 AllocatorProxyInterface::r#get_vmo_info(self, payload)
963 }
964}
965
966impl AllocatorProxyInterface for AllocatorProxy {
967 fn r#allocate_non_shared_collection(
968 &self,
969 mut payload: AllocatorAllocateNonSharedCollectionRequest,
970 ) -> Result<(), fidl::Error> {
971 self.client.send::<AllocatorAllocateNonSharedCollectionRequest>(
972 &mut payload,
973 0x5ca681f025a80e44,
974 fidl::encoding::DynamicFlags::FLEXIBLE,
975 )
976 }
977
978 fn r#allocate_shared_collection(
979 &self,
980 mut payload: AllocatorAllocateSharedCollectionRequest,
981 ) -> Result<(), fidl::Error> {
982 self.client.send::<AllocatorAllocateSharedCollectionRequest>(
983 &mut payload,
984 0x11a19ff51f0b49c1,
985 fidl::encoding::DynamicFlags::FLEXIBLE,
986 )
987 }
988
989 fn r#bind_shared_collection(
990 &self,
991 mut payload: AllocatorBindSharedCollectionRequest,
992 ) -> Result<(), fidl::Error> {
993 self.client.send::<AllocatorBindSharedCollectionRequest>(
994 &mut payload,
995 0x550916b0dc1d5b4e,
996 fidl::encoding::DynamicFlags::FLEXIBLE,
997 )
998 }
999
1000 type ValidateBufferCollectionTokenResponseFut = fidl::client::QueryResponseFut<
1001 AllocatorValidateBufferCollectionTokenResponse,
1002 fidl::encoding::DefaultFuchsiaResourceDialect,
1003 >;
1004 fn r#validate_buffer_collection_token(
1005 &self,
1006 mut payload: &AllocatorValidateBufferCollectionTokenRequest,
1007 ) -> Self::ValidateBufferCollectionTokenResponseFut {
1008 fn _decode(
1009 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
1010 ) -> Result<AllocatorValidateBufferCollectionTokenResponse, fidl::Error> {
1011 let _response = fidl::client::decode_transaction_body::<
1012 fidl::encoding::FlexibleType<AllocatorValidateBufferCollectionTokenResponse>,
1013 fidl::encoding::DefaultFuchsiaResourceDialect,
1014 0x4c5ee91b02a7e68d,
1015 >(_buf?)?
1016 .into_result::<AllocatorMarker>("validate_buffer_collection_token")?;
1017 Ok(_response)
1018 }
1019 self.client.send_query_and_decode::<
1020 AllocatorValidateBufferCollectionTokenRequest,
1021 AllocatorValidateBufferCollectionTokenResponse,
1022 >(
1023 payload,
1024 0x4c5ee91b02a7e68d,
1025 fidl::encoding::DynamicFlags::FLEXIBLE,
1026 _decode,
1027 )
1028 }
1029
1030 fn r#set_debug_client_info(
1031 &self,
1032 mut payload: &AllocatorSetDebugClientInfoRequest,
1033 ) -> Result<(), fidl::Error> {
1034 self.client.send::<AllocatorSetDebugClientInfoRequest>(
1035 payload,
1036 0x6f68f19a3f509c4d,
1037 fidl::encoding::DynamicFlags::FLEXIBLE,
1038 )
1039 }
1040
1041 type GetVmoInfoResponseFut = fidl::client::QueryResponseFut<
1042 AllocatorGetVmoInfoResult,
1043 fidl::encoding::DefaultFuchsiaResourceDialect,
1044 >;
1045 fn r#get_vmo_info(
1046 &self,
1047 mut payload: AllocatorGetVmoInfoRequest,
1048 ) -> Self::GetVmoInfoResponseFut {
1049 fn _decode(
1050 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
1051 ) -> Result<AllocatorGetVmoInfoResult, fidl::Error> {
1052 let _response = fidl::client::decode_transaction_body::<
1053 fidl::encoding::FlexibleResultType<AllocatorGetVmoInfoResponse, Error>,
1054 fidl::encoding::DefaultFuchsiaResourceDialect,
1055 0x21a881120aa0ddf9,
1056 >(_buf?)?
1057 .into_result::<AllocatorMarker>("get_vmo_info")?;
1058 Ok(_response.map(|x| x))
1059 }
1060 self.client.send_query_and_decode::<AllocatorGetVmoInfoRequest, AllocatorGetVmoInfoResult>(
1061 &mut payload,
1062 0x21a881120aa0ddf9,
1063 fidl::encoding::DynamicFlags::FLEXIBLE,
1064 _decode,
1065 )
1066 }
1067}
1068
1069pub struct AllocatorEventStream {
1070 event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
1071}
1072
1073impl std::marker::Unpin for AllocatorEventStream {}
1074
1075impl futures::stream::FusedStream for AllocatorEventStream {
1076 fn is_terminated(&self) -> bool {
1077 self.event_receiver.is_terminated()
1078 }
1079}
1080
1081impl futures::Stream for AllocatorEventStream {
1082 type Item = Result<AllocatorEvent, fidl::Error>;
1083
1084 fn poll_next(
1085 mut self: std::pin::Pin<&mut Self>,
1086 cx: &mut std::task::Context<'_>,
1087 ) -> std::task::Poll<Option<Self::Item>> {
1088 match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
1089 &mut self.event_receiver,
1090 cx
1091 )?) {
1092 Some(buf) => std::task::Poll::Ready(Some(AllocatorEvent::decode(buf))),
1093 None => std::task::Poll::Ready(None),
1094 }
1095 }
1096}
1097
1098#[derive(Debug)]
1099pub enum AllocatorEvent {
1100 #[non_exhaustive]
1101 _UnknownEvent {
1102 /// Ordinal of the event that was sent.
1103 ordinal: u64,
1104 },
1105}
1106
1107impl AllocatorEvent {
1108 /// Decodes a message buffer as a [`AllocatorEvent`].
1109 fn decode(
1110 mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
1111 ) -> Result<AllocatorEvent, fidl::Error> {
1112 let (bytes, _handles) = buf.split_mut();
1113 let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
1114 debug_assert_eq!(tx_header.tx_id, 0);
1115 match tx_header.ordinal {
1116 _ if tx_header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
1117 Ok(AllocatorEvent::_UnknownEvent { ordinal: tx_header.ordinal })
1118 }
1119 _ => Err(fidl::Error::UnknownOrdinal {
1120 ordinal: tx_header.ordinal,
1121 protocol_name: <AllocatorMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
1122 }),
1123 }
1124 }
1125}
1126
1127/// A Stream of incoming requests for fuchsia.sysmem2/Allocator.
1128pub struct AllocatorRequestStream {
1129 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
1130 is_terminated: bool,
1131}
1132
1133impl std::marker::Unpin for AllocatorRequestStream {}
1134
1135impl futures::stream::FusedStream for AllocatorRequestStream {
1136 fn is_terminated(&self) -> bool {
1137 self.is_terminated
1138 }
1139}
1140
1141impl fidl::endpoints::RequestStream for AllocatorRequestStream {
1142 type Protocol = AllocatorMarker;
1143 type ControlHandle = AllocatorControlHandle;
1144
1145 fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
1146 Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
1147 }
1148
1149 fn control_handle(&self) -> Self::ControlHandle {
1150 AllocatorControlHandle { inner: self.inner.clone() }
1151 }
1152
1153 fn into_inner(
1154 self,
1155 ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
1156 {
1157 (self.inner, self.is_terminated)
1158 }
1159
1160 fn from_inner(
1161 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
1162 is_terminated: bool,
1163 ) -> Self {
1164 Self { inner, is_terminated }
1165 }
1166}
1167
1168impl futures::Stream for AllocatorRequestStream {
1169 type Item = Result<AllocatorRequest, fidl::Error>;
1170
1171 fn poll_next(
1172 mut self: std::pin::Pin<&mut Self>,
1173 cx: &mut std::task::Context<'_>,
1174 ) -> std::task::Poll<Option<Self::Item>> {
1175 let this = &mut *self;
1176 if this.inner.check_shutdown(cx) {
1177 this.is_terminated = true;
1178 return std::task::Poll::Ready(None);
1179 }
1180 if this.is_terminated {
1181 panic!("polled AllocatorRequestStream after completion");
1182 }
1183 fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
1184 |bytes, handles| {
1185 match this.inner.channel().read_etc(cx, bytes, handles) {
1186 std::task::Poll::Ready(Ok(())) => {}
1187 std::task::Poll::Pending => return std::task::Poll::Pending,
1188 std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
1189 this.is_terminated = true;
1190 return std::task::Poll::Ready(None);
1191 }
1192 std::task::Poll::Ready(Err(e)) => {
1193 return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
1194 e.into(),
1195 ))));
1196 }
1197 }
1198
1199 // A message has been received from the channel
1200 let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
1201
1202 std::task::Poll::Ready(Some(match header.ordinal {
1203 0x5ca681f025a80e44 => {
1204 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
1205 let mut req = fidl::new_empty!(
1206 AllocatorAllocateNonSharedCollectionRequest,
1207 fidl::encoding::DefaultFuchsiaResourceDialect
1208 );
1209 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<AllocatorAllocateNonSharedCollectionRequest>(&header, _body_bytes, handles, &mut req)?;
1210 let control_handle = AllocatorControlHandle { inner: this.inner.clone() };
1211 Ok(AllocatorRequest::AllocateNonSharedCollection {
1212 payload: req,
1213 control_handle,
1214 })
1215 }
1216 0x11a19ff51f0b49c1 => {
1217 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
1218 let mut req = fidl::new_empty!(
1219 AllocatorAllocateSharedCollectionRequest,
1220 fidl::encoding::DefaultFuchsiaResourceDialect
1221 );
1222 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<AllocatorAllocateSharedCollectionRequest>(&header, _body_bytes, handles, &mut req)?;
1223 let control_handle = AllocatorControlHandle { inner: this.inner.clone() };
1224 Ok(AllocatorRequest::AllocateSharedCollection {
1225 payload: req,
1226 control_handle,
1227 })
1228 }
1229 0x550916b0dc1d5b4e => {
1230 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
1231 let mut req = fidl::new_empty!(
1232 AllocatorBindSharedCollectionRequest,
1233 fidl::encoding::DefaultFuchsiaResourceDialect
1234 );
1235 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<AllocatorBindSharedCollectionRequest>(&header, _body_bytes, handles, &mut req)?;
1236 let control_handle = AllocatorControlHandle { inner: this.inner.clone() };
1237 Ok(AllocatorRequest::BindSharedCollection { payload: req, control_handle })
1238 }
1239 0x4c5ee91b02a7e68d => {
1240 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
1241 let mut req = fidl::new_empty!(
1242 AllocatorValidateBufferCollectionTokenRequest,
1243 fidl::encoding::DefaultFuchsiaResourceDialect
1244 );
1245 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<AllocatorValidateBufferCollectionTokenRequest>(&header, _body_bytes, handles, &mut req)?;
1246 let control_handle = AllocatorControlHandle { inner: this.inner.clone() };
1247 Ok(AllocatorRequest::ValidateBufferCollectionToken {
1248 payload: req,
1249 responder: AllocatorValidateBufferCollectionTokenResponder {
1250 control_handle: std::mem::ManuallyDrop::new(control_handle),
1251 tx_id: header.tx_id,
1252 },
1253 })
1254 }
1255 0x6f68f19a3f509c4d => {
1256 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
1257 let mut req = fidl::new_empty!(
1258 AllocatorSetDebugClientInfoRequest,
1259 fidl::encoding::DefaultFuchsiaResourceDialect
1260 );
1261 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<AllocatorSetDebugClientInfoRequest>(&header, _body_bytes, handles, &mut req)?;
1262 let control_handle = AllocatorControlHandle { inner: this.inner.clone() };
1263 Ok(AllocatorRequest::SetDebugClientInfo { payload: req, control_handle })
1264 }
1265 0x21a881120aa0ddf9 => {
1266 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
1267 let mut req = fidl::new_empty!(
1268 AllocatorGetVmoInfoRequest,
1269 fidl::encoding::DefaultFuchsiaResourceDialect
1270 );
1271 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<AllocatorGetVmoInfoRequest>(&header, _body_bytes, handles, &mut req)?;
1272 let control_handle = AllocatorControlHandle { inner: this.inner.clone() };
1273 Ok(AllocatorRequest::GetVmoInfo {
1274 payload: req,
1275 responder: AllocatorGetVmoInfoResponder {
1276 control_handle: std::mem::ManuallyDrop::new(control_handle),
1277 tx_id: header.tx_id,
1278 },
1279 })
1280 }
1281 _ if header.tx_id == 0
1282 && header
1283 .dynamic_flags()
1284 .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
1285 {
1286 Ok(AllocatorRequest::_UnknownMethod {
1287 ordinal: header.ordinal,
1288 control_handle: AllocatorControlHandle { inner: this.inner.clone() },
1289 method_type: fidl::MethodType::OneWay,
1290 })
1291 }
1292 _ if header
1293 .dynamic_flags()
1294 .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
1295 {
1296 this.inner.send_framework_err(
1297 fidl::encoding::FrameworkErr::UnknownMethod,
1298 header.tx_id,
1299 header.ordinal,
1300 header.dynamic_flags(),
1301 (bytes, handles),
1302 )?;
1303 Ok(AllocatorRequest::_UnknownMethod {
1304 ordinal: header.ordinal,
1305 control_handle: AllocatorControlHandle { inner: this.inner.clone() },
1306 method_type: fidl::MethodType::TwoWay,
1307 })
1308 }
1309 _ => Err(fidl::Error::UnknownOrdinal {
1310 ordinal: header.ordinal,
1311 protocol_name:
1312 <AllocatorMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
1313 }),
1314 }))
1315 },
1316 )
1317 }
1318}
1319
1320/// Allocates system memory buffers.
1321///
1322/// Epitaphs are not used in this protocol.
1323#[derive(Debug)]
1324pub enum AllocatorRequest {
1325 /// Allocates a buffer collection on behalf of a single client (aka
1326 /// initiator) who is also the only participant (from the point of view of
1327 /// sysmem).
1328 ///
1329 /// This call exists mainly for temp/testing purposes. This call skips the
1330 /// [`fuchsia.sysmem2/BufferCollectionToken`] stage, so there's no way to
1331 /// allow another participant to specify its constraints.
1332 ///
1333 /// Real clients are encouraged to use
1334 /// [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`] instead, and to
1335 /// let relevant participants directly convey their own constraints to
1336 /// sysmem by sending `BufferCollectionToken`s to those participants.
1337 ///
1338 /// + request `collection_request` The server end of the
1339 /// [`fuchsia.sysmem2/BufferCollection`].
1340 AllocateNonSharedCollection {
1341 payload: AllocatorAllocateNonSharedCollectionRequest,
1342 control_handle: AllocatorControlHandle,
1343 },
1344 /// Creates a root [`fuchsia.sysmem2/BufferCollectionToken`].
1345 ///
1346 /// The `BufferCollectionToken` can be "duplicated" for distribution to
1347 /// participants by using
1348 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`]. Each
1349 /// `BufferCollectionToken` can be converted into a
1350 /// [`fuchsia.sysmem2.BufferCollection`] using
1351 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`].
1352 ///
1353 /// Buffer constraints can be set via
1354 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
1355 ///
1356 /// Success/failure to populate the buffer collection with buffers can be
1357 /// determined from
1358 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
1359 ///
1360 /// Closing the client end of a `BufferCollectionToken` or
1361 /// `BufferCollection` (without `Release` first) will fail all client ends
1362 /// in the same failure domain, which by default is all client ends of the
1363 /// buffer collection. See
1364 /// [`fuchsia.sysmem2/BufferCollection.SetDispensable`] and
1365 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`] for ways to create
1366 /// separate failure domains within a buffer collection.
1367 AllocateSharedCollection {
1368 payload: AllocatorAllocateSharedCollectionRequest,
1369 control_handle: AllocatorControlHandle,
1370 },
1371 /// Convert a [`fuchsia.sysmem2/BufferCollectionToken`] into a
1372 /// [`fuchsia.sysmem2/BufferCollection`].
1373 ///
1374 /// At the time of sending this message, the buffer collection hasn't yet
1375 /// been populated with buffers - the participant must first also send
1376 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] via the
1377 /// `BufferCollection` client end.
1378 ///
1379 /// All `BufferCollectionToken`(s) duplicated from a root
1380 /// `BufferCollectionToken` (created via `AllocateSharedCollection`) must be
1381 /// "turned in" via `BindSharedCollection` (or `Release`ed), and all
1382 /// existing `BufferCollection` client ends must have sent `SetConstraints`
1383 /// before the logical BufferCollection will be populated with buffers (or
1384 /// will fail if the overall set of constraints can't be satisfied).
1385 ///
1386 /// + request `token` The client endpoint of a channel whose server end was
1387 /// sent to sysmem using
1388 /// [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`] or whose server
1389 /// end was sent to sysmem using
1390 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`]. The token is
1391 /// being "turned in" in exchange for a
1392 /// [`fuchsia.sysmem2/BufferCollection`].
1393 /// + request `buffer_collection_request` The server end of a
1394 /// [`fuchsia.sysmem2/BufferCollection`] channel. The sender retains the
1395 /// client end. The `BufferCollection` channel is a single participant's
1396 /// connection to the logical buffer collection. Typically there will be
1397 /// other participants with their own `BufferCollection` channel to the
1398 /// logical buffer collection.
1399 BindSharedCollection {
1400 payload: AllocatorBindSharedCollectionRequest,
1401 control_handle: AllocatorControlHandle,
1402 },
1403 /// Checks whether a [`fuchsia.sysmem2/BufferCollectionToken`] is known to
1404 /// the sysmem server.
1405 ///
1406 /// With this call, the client can determine whether an incoming token is a
1407 /// real sysmem token that is known to the sysmem server, without any risk
1408 /// of getting stuck waiting forever on a potentially fake token to complete
1409 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] or
1410 /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] (or any other two-way
1411 /// FIDL message). In cases where the client trusts the source of the token
1412 /// to provide a real token, this call is not typically needed outside of
1413 /// debugging.
1414 ///
1415 /// If the validate fails sometimes but succeeds other times, the source of
1416 /// the token may itself not be calling
1417 /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] or
1418 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after creating/duplicating the
1419 /// token but before sending the token to the current client. It may be more
1420 /// convenient for the source to use
1421 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] to duplicate
1422 /// token(s), since that call has the sync step built in. Or, the buffer
1423 /// collection may be failing before this call is processed by the sysmem
1424 /// server, as buffer collection failure cleans up sysmem's tracking of
1425 /// associated tokens.
1426 ///
1427 /// This call has no effect on any token.
1428 ///
1429 /// + request `token_server_koid` The koid of the server end of a channel
1430 /// that might be a BufferCollectionToken channel. This can be obtained
1431 /// via `zx_object_get_info` `ZX_INFO_HANDLE_BASIC` `related_koid`.
1432 /// - response `is_known` true means sysmem knew of the token at the time
1433 /// sysmem processed the request, but doesn't guarantee that the token is
1434 /// still valid by the time the client receives the reply. What it does
1435 /// guarantee is that the token at least was a real token, so a two-way
1436 /// call to the token won't stall forever (will fail or succeed fairly
1437 /// quickly, not stall). This can already be known implicitly if the
1438 /// source of the token can be trusted to provide a real token. A false
1439 /// value means the token wasn't known to sysmem at the time sysmem
1440 /// processed this call, but the token may have previously been valid, or
1441 /// may yet become valid. Or if the sender of the token isn't trusted to
1442 /// provide a real token, the token may be fake. It's the responsibility
1443 /// of the sender to sync with sysmem to ensure that previously
1444 /// created/duplicated token(s) are known to sysmem, before sending the
1445 /// token(s) to other participants.
1446 ValidateBufferCollectionToken {
1447 payload: AllocatorValidateBufferCollectionTokenRequest,
1448 responder: AllocatorValidateBufferCollectionTokenResponder,
1449 },
1450 /// Set information about the current client that can be used by sysmem to
1451 /// help diagnose leaking memory and allocation stalls waiting for a
1452 /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
1453 ///
1454 /// This sets the debug client info on all [`fuchsia.sysmem2/Node`](s)
1455 /// subsequently created by this this [`fuchsia.sysmem2/Allocator`]
1456 /// including any [`fuchsia.sysmem2/BufferCollection`](s) created via
1457 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] (in the absence of
1458 /// any prior call to [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`],
1459 /// these `BufferCollection`(s) have the same initial debug client info as
1460 /// the token turned in to create the `BufferCollection`).
1461 ///
1462 /// This info can be subsequently overridden on a per-`Node` basis by
1463 /// sending [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
1464 ///
1465 /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
1466 /// `Allocator` is the most efficient way to ensure that all
1467 /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
1468 /// set, and is also more efficient than separately sending the same debug
1469 /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
1470 /// created [`fuchsia.sysmem2/Node`].
1471 ///
1472 /// + request `name` This can be an arbitrary string, but the current
1473 /// process name (see `fsl::GetCurrentProcessName`) is a good default.
1474 /// + request `id` This can be an arbitrary id, but the current process ID
1475 /// (see `fsl::GetCurrentProcessKoid`) is a good default.
1476 SetDebugClientInfo {
1477 payload: AllocatorSetDebugClientInfoRequest,
1478 control_handle: AllocatorControlHandle,
1479 },
1480 /// Given a handle to a sysmem-provided VMO, this returns additional info
1481 /// about the corresponding sysmem logical buffer.
1482 ///
1483 /// Most callers will duplicate a VMO handle first and send the duplicate to
1484 /// this call.
1485 ///
1486 /// If the client has created a child VMO of a sysmem-provided VMO, that
1487 /// child VMO isn't considered a "sysmem VMO" for purposes of this call.
1488 ///
1489 /// + request `vmo` A handle to a sysmem-provided VMO (or see errors).
1490 /// - response `buffer_collection_id` The buffer collection ID, which is
1491 /// unique per logical buffer collection per boot.
1492 /// - response `buffer_index` The buffer index of the buffer within the
1493 /// buffer collection. This is the same as the index of the buffer within
1494 /// [`fuchsia.sysmem2/BufferCollectionInfo.buffers`]. The `buffer_index`
1495 /// is the same for all sysmem-delivered VMOs corresponding to the same
1496 /// logical buffer, even if the VMO koids differ. The `buffer_index` is
1497 /// only unique across buffers of a buffer collection. For a given buffer,
1498 /// the combination of `buffer_collection_id` and `buffer_index` is unique
1499 /// per boot.
1500 /// - response `close_weak_asap` Iff `vmo` is a handle to a weak sysmem VMO,
1501 /// the `close_weak_asap` field will be set in the response. This handle
1502 /// will signal `ZX_EVENTPAIR_PEER_CLOSED` when all weak VMO handles to
1503 /// the buffer should be closed as soon as possible. This is signalled
1504 /// shortly after all strong sysmem VMOs to the buffer are closed
1505 /// (including any held indirectly via strong `BufferCollectionToken` or
1506 /// strong `BufferCollection`). Failure to close all weak sysmem VMO
1507 /// handles to the buffer quickly upon `ZX_EVENTPAIR_PEER_CLOSED` is
1508 /// considered a VMO leak caused by the client still holding a weak sysmem
1509 /// VMO handle and results in loud complaints to the log by sysmem. The
1510 /// buffers of a collection can be freed independently of each other. The
1511 /// `ZX_EVENTPAIR_PEER_CLOSED` may already be signalled before the
1512 /// response arrives at the client. A client that isn't prepared to handle
1513 /// weak sysmem VMOs, on seeing this field set, can close all handles to
1514 /// the buffer and fail any associated request.
1515 /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` - the vmo isn't a sysmem
1516 /// VMO. Both strong and weak sysmem VMOs can be passed to this call, and
1517 /// the VMO handle passed in to this call itself keeps the VMO's info
1518 /// alive for purposes of responding to this call. Because of this,
1519 /// ZX_ERR_NOT_FOUND errors are unambiguous (even if there are no other
1520 /// handles to the VMO when calling; even if other handles are closed
1521 /// before the GetVmoInfo response arrives at the client).
1522 /// * error `[fuchsia.sysmem2/Error.HANDLE_ACCESS_DENIED]` The vmo isn't
1523 /// capable of being used with GetVmoInfo due to rights/capability
1524 /// attenuation. The VMO needs to be usable with [`zx_vmo_get_info`] with
1525 /// topic [`ZX_INFO_HANDLE_BASIC`].
1526 /// * error `[fuchsia.sysmem2/Error.UNSPECIFIED]` The request failed for an
1527 /// unspecified reason. See the log for more info.
1528 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The vmo field
1529 /// wasn't set, or there was some other problem with the request field(s).
1530 GetVmoInfo { payload: AllocatorGetVmoInfoRequest, responder: AllocatorGetVmoInfoResponder },
1531 /// An interaction was received which does not match any known method.
1532 #[non_exhaustive]
1533 _UnknownMethod {
1534 /// Ordinal of the method that was called.
1535 ordinal: u64,
1536 control_handle: AllocatorControlHandle,
1537 method_type: fidl::MethodType,
1538 },
1539}
1540
1541impl AllocatorRequest {
1542 #[allow(irrefutable_let_patterns)]
1543 pub fn into_allocate_non_shared_collection(
1544 self,
1545 ) -> Option<(AllocatorAllocateNonSharedCollectionRequest, AllocatorControlHandle)> {
1546 if let AllocatorRequest::AllocateNonSharedCollection { payload, control_handle } = self {
1547 Some((payload, control_handle))
1548 } else {
1549 None
1550 }
1551 }
1552
1553 #[allow(irrefutable_let_patterns)]
1554 pub fn into_allocate_shared_collection(
1555 self,
1556 ) -> Option<(AllocatorAllocateSharedCollectionRequest, AllocatorControlHandle)> {
1557 if let AllocatorRequest::AllocateSharedCollection { payload, control_handle } = self {
1558 Some((payload, control_handle))
1559 } else {
1560 None
1561 }
1562 }
1563
1564 #[allow(irrefutable_let_patterns)]
1565 pub fn into_bind_shared_collection(
1566 self,
1567 ) -> Option<(AllocatorBindSharedCollectionRequest, AllocatorControlHandle)> {
1568 if let AllocatorRequest::BindSharedCollection { payload, control_handle } = self {
1569 Some((payload, control_handle))
1570 } else {
1571 None
1572 }
1573 }
1574
1575 #[allow(irrefutable_let_patterns)]
1576 pub fn into_validate_buffer_collection_token(
1577 self,
1578 ) -> Option<(
1579 AllocatorValidateBufferCollectionTokenRequest,
1580 AllocatorValidateBufferCollectionTokenResponder,
1581 )> {
1582 if let AllocatorRequest::ValidateBufferCollectionToken { payload, responder } = self {
1583 Some((payload, responder))
1584 } else {
1585 None
1586 }
1587 }
1588
1589 #[allow(irrefutable_let_patterns)]
1590 pub fn into_set_debug_client_info(
1591 self,
1592 ) -> Option<(AllocatorSetDebugClientInfoRequest, AllocatorControlHandle)> {
1593 if let AllocatorRequest::SetDebugClientInfo { payload, control_handle } = self {
1594 Some((payload, control_handle))
1595 } else {
1596 None
1597 }
1598 }
1599
1600 #[allow(irrefutable_let_patterns)]
1601 pub fn into_get_vmo_info(
1602 self,
1603 ) -> Option<(AllocatorGetVmoInfoRequest, AllocatorGetVmoInfoResponder)> {
1604 if let AllocatorRequest::GetVmoInfo { payload, responder } = self {
1605 Some((payload, responder))
1606 } else {
1607 None
1608 }
1609 }
1610
1611 /// Name of the method defined in FIDL
1612 pub fn method_name(&self) -> &'static str {
1613 match *self {
1614 AllocatorRequest::AllocateNonSharedCollection { .. } => {
1615 "allocate_non_shared_collection"
1616 }
1617 AllocatorRequest::AllocateSharedCollection { .. } => "allocate_shared_collection",
1618 AllocatorRequest::BindSharedCollection { .. } => "bind_shared_collection",
1619 AllocatorRequest::ValidateBufferCollectionToken { .. } => {
1620 "validate_buffer_collection_token"
1621 }
1622 AllocatorRequest::SetDebugClientInfo { .. } => "set_debug_client_info",
1623 AllocatorRequest::GetVmoInfo { .. } => "get_vmo_info",
1624 AllocatorRequest::_UnknownMethod { method_type: fidl::MethodType::OneWay, .. } => {
1625 "unknown one-way method"
1626 }
1627 AllocatorRequest::_UnknownMethod { method_type: fidl::MethodType::TwoWay, .. } => {
1628 "unknown two-way method"
1629 }
1630 }
1631 }
1632}
1633
1634#[derive(Debug, Clone)]
1635pub struct AllocatorControlHandle {
1636 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
1637}
1638
1639impl fidl::endpoints::ControlHandle for AllocatorControlHandle {
1640 fn shutdown(&self) {
1641 self.inner.shutdown()
1642 }
1643
1644 fn shutdown_with_epitaph(&self, status: zx_status::Status) {
1645 self.inner.shutdown_with_epitaph(status)
1646 }
1647
1648 fn is_closed(&self) -> bool {
1649 self.inner.channel().is_closed()
1650 }
1651 fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
1652 self.inner.channel().on_closed()
1653 }
1654
1655 #[cfg(target_os = "fuchsia")]
1656 fn signal_peer(
1657 &self,
1658 clear_mask: zx::Signals,
1659 set_mask: zx::Signals,
1660 ) -> Result<(), zx_status::Status> {
1661 use fidl::Peered;
1662 self.inner.channel().signal_peer(clear_mask, set_mask)
1663 }
1664}
1665
1666impl AllocatorControlHandle {}
1667
1668#[must_use = "FIDL methods require a response to be sent"]
1669#[derive(Debug)]
1670pub struct AllocatorValidateBufferCollectionTokenResponder {
1671 control_handle: std::mem::ManuallyDrop<AllocatorControlHandle>,
1672 tx_id: u32,
1673}
1674
1675/// Set the the channel to be shutdown (see [`AllocatorControlHandle::shutdown`])
1676/// if the responder is dropped without sending a response, so that the client
1677/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
1678impl std::ops::Drop for AllocatorValidateBufferCollectionTokenResponder {
1679 fn drop(&mut self) {
1680 self.control_handle.shutdown();
1681 // Safety: drops once, never accessed again
1682 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
1683 }
1684}
1685
1686impl fidl::endpoints::Responder for AllocatorValidateBufferCollectionTokenResponder {
1687 type ControlHandle = AllocatorControlHandle;
1688
1689 fn control_handle(&self) -> &AllocatorControlHandle {
1690 &self.control_handle
1691 }
1692
1693 fn drop_without_shutdown(mut self) {
1694 // Safety: drops once, never accessed again due to mem::forget
1695 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
1696 // Prevent Drop from running (which would shut down the channel)
1697 std::mem::forget(self);
1698 }
1699}
1700
1701impl AllocatorValidateBufferCollectionTokenResponder {
1702 /// Sends a response to the FIDL transaction.
1703 ///
1704 /// Sets the channel to shutdown if an error occurs.
1705 pub fn send(
1706 self,
1707 mut payload: &AllocatorValidateBufferCollectionTokenResponse,
1708 ) -> Result<(), fidl::Error> {
1709 let _result = self.send_raw(payload);
1710 if _result.is_err() {
1711 self.control_handle.shutdown();
1712 }
1713 self.drop_without_shutdown();
1714 _result
1715 }
1716
1717 /// Similar to "send" but does not shutdown the channel if an error occurs.
1718 pub fn send_no_shutdown_on_err(
1719 self,
1720 mut payload: &AllocatorValidateBufferCollectionTokenResponse,
1721 ) -> Result<(), fidl::Error> {
1722 let _result = self.send_raw(payload);
1723 self.drop_without_shutdown();
1724 _result
1725 }
1726
1727 fn send_raw(
1728 &self,
1729 mut payload: &AllocatorValidateBufferCollectionTokenResponse,
1730 ) -> Result<(), fidl::Error> {
1731 self.control_handle.inner.send::<fidl::encoding::FlexibleType<
1732 AllocatorValidateBufferCollectionTokenResponse,
1733 >>(
1734 fidl::encoding::Flexible::new(payload),
1735 self.tx_id,
1736 0x4c5ee91b02a7e68d,
1737 fidl::encoding::DynamicFlags::FLEXIBLE,
1738 )
1739 }
1740}
1741
1742#[must_use = "FIDL methods require a response to be sent"]
1743#[derive(Debug)]
1744pub struct AllocatorGetVmoInfoResponder {
1745 control_handle: std::mem::ManuallyDrop<AllocatorControlHandle>,
1746 tx_id: u32,
1747}
1748
1749/// Set the the channel to be shutdown (see [`AllocatorControlHandle::shutdown`])
1750/// if the responder is dropped without sending a response, so that the client
1751/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
1752impl std::ops::Drop for AllocatorGetVmoInfoResponder {
1753 fn drop(&mut self) {
1754 self.control_handle.shutdown();
1755 // Safety: drops once, never accessed again
1756 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
1757 }
1758}
1759
1760impl fidl::endpoints::Responder for AllocatorGetVmoInfoResponder {
1761 type ControlHandle = AllocatorControlHandle;
1762
1763 fn control_handle(&self) -> &AllocatorControlHandle {
1764 &self.control_handle
1765 }
1766
1767 fn drop_without_shutdown(mut self) {
1768 // Safety: drops once, never accessed again due to mem::forget
1769 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
1770 // Prevent Drop from running (which would shut down the channel)
1771 std::mem::forget(self);
1772 }
1773}
1774
1775impl AllocatorGetVmoInfoResponder {
1776 /// Sends a response to the FIDL transaction.
1777 ///
1778 /// Sets the channel to shutdown if an error occurs.
1779 pub fn send(
1780 self,
1781 mut result: Result<AllocatorGetVmoInfoResponse, Error>,
1782 ) -> Result<(), fidl::Error> {
1783 let _result = self.send_raw(result);
1784 if _result.is_err() {
1785 self.control_handle.shutdown();
1786 }
1787 self.drop_without_shutdown();
1788 _result
1789 }
1790
1791 /// Similar to "send" but does not shutdown the channel if an error occurs.
1792 pub fn send_no_shutdown_on_err(
1793 self,
1794 mut result: Result<AllocatorGetVmoInfoResponse, Error>,
1795 ) -> Result<(), fidl::Error> {
1796 let _result = self.send_raw(result);
1797 self.drop_without_shutdown();
1798 _result
1799 }
1800
1801 fn send_raw(
1802 &self,
1803 mut result: Result<AllocatorGetVmoInfoResponse, Error>,
1804 ) -> Result<(), fidl::Error> {
1805 self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
1806 AllocatorGetVmoInfoResponse,
1807 Error,
1808 >>(
1809 fidl::encoding::FlexibleResult::new(result.as_mut().map_err(|e| *e)),
1810 self.tx_id,
1811 0x21a881120aa0ddf9,
1812 fidl::encoding::DynamicFlags::FLEXIBLE,
1813 )
1814 }
1815}
1816
1817#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
1818pub struct BufferCollectionMarker;
1819
1820impl fidl::endpoints::ProtocolMarker for BufferCollectionMarker {
1821 type Proxy = BufferCollectionProxy;
1822 type RequestStream = BufferCollectionRequestStream;
1823 #[cfg(target_os = "fuchsia")]
1824 type SynchronousProxy = BufferCollectionSynchronousProxy;
1825
1826 const DEBUG_NAME: &'static str = "(anonymous) BufferCollection";
1827}
1828pub type BufferCollectionWaitForAllBuffersAllocatedResult =
1829 Result<BufferCollectionWaitForAllBuffersAllocatedResponse, Error>;
1830pub type BufferCollectionCheckAllBuffersAllocatedResult = Result<(), Error>;
1831
1832pub trait BufferCollectionProxyInterface: Send + Sync {
1833 type SyncResponseFut: std::future::Future<Output = Result<(), fidl::Error>> + Send;
1834 fn r#sync(&self) -> Self::SyncResponseFut;
1835 fn r#release(&self) -> Result<(), fidl::Error>;
1836 fn r#set_name(&self, payload: &NodeSetNameRequest) -> Result<(), fidl::Error>;
1837 fn r#set_debug_client_info(
1838 &self,
1839 payload: &NodeSetDebugClientInfoRequest,
1840 ) -> Result<(), fidl::Error>;
1841 fn r#set_debug_timeout_log_deadline(
1842 &self,
1843 payload: &NodeSetDebugTimeoutLogDeadlineRequest,
1844 ) -> Result<(), fidl::Error>;
1845 fn r#set_verbose_logging(&self) -> Result<(), fidl::Error>;
1846 type GetNodeRefResponseFut: std::future::Future<Output = Result<NodeGetNodeRefResponse, fidl::Error>>
1847 + Send;
1848 fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut;
1849 type IsAlternateForResponseFut: std::future::Future<Output = Result<NodeIsAlternateForResult, fidl::Error>>
1850 + Send;
1851 fn r#is_alternate_for(
1852 &self,
1853 payload: NodeIsAlternateForRequest,
1854 ) -> Self::IsAlternateForResponseFut;
1855 type GetBufferCollectionIdResponseFut: std::future::Future<Output = Result<NodeGetBufferCollectionIdResponse, fidl::Error>>
1856 + Send;
1857 fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut;
1858 fn r#set_weak(&self) -> Result<(), fidl::Error>;
1859 fn r#set_weak_ok(&self, payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error>;
1860 fn r#attach_node_tracking(
1861 &self,
1862 payload: NodeAttachNodeTrackingRequest,
1863 ) -> Result<(), fidl::Error>;
1864 fn r#set_constraints(
1865 &self,
1866 payload: BufferCollectionSetConstraintsRequest,
1867 ) -> Result<(), fidl::Error>;
1868 type WaitForAllBuffersAllocatedResponseFut: std::future::Future<
1869 Output = Result<BufferCollectionWaitForAllBuffersAllocatedResult, fidl::Error>,
1870 > + Send;
1871 fn r#wait_for_all_buffers_allocated(&self) -> Self::WaitForAllBuffersAllocatedResponseFut;
1872 type CheckAllBuffersAllocatedResponseFut: std::future::Future<
1873 Output = Result<BufferCollectionCheckAllBuffersAllocatedResult, fidl::Error>,
1874 > + Send;
1875 fn r#check_all_buffers_allocated(&self) -> Self::CheckAllBuffersAllocatedResponseFut;
1876 fn r#attach_token(
1877 &self,
1878 payload: BufferCollectionAttachTokenRequest,
1879 ) -> Result<(), fidl::Error>;
1880 fn r#attach_lifetime_tracking(
1881 &self,
1882 payload: BufferCollectionAttachLifetimeTrackingRequest,
1883 ) -> Result<(), fidl::Error>;
1884}
1885#[derive(Debug)]
1886#[cfg(target_os = "fuchsia")]
1887pub struct BufferCollectionSynchronousProxy {
1888 client: fidl::client::sync::Client,
1889}
1890
1891#[cfg(target_os = "fuchsia")]
1892impl fidl::endpoints::SynchronousProxy for BufferCollectionSynchronousProxy {
1893 type Proxy = BufferCollectionProxy;
1894 type Protocol = BufferCollectionMarker;
1895
1896 fn from_channel(inner: fidl::Channel) -> Self {
1897 Self::new(inner)
1898 }
1899
1900 fn into_channel(self) -> fidl::Channel {
1901 self.client.into_channel()
1902 }
1903
1904 fn as_channel(&self) -> &fidl::Channel {
1905 self.client.as_channel()
1906 }
1907}
1908
1909#[cfg(target_os = "fuchsia")]
1910impl BufferCollectionSynchronousProxy {
1911 pub fn new(channel: fidl::Channel) -> Self {
1912 let protocol_name = <BufferCollectionMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
1913 Self { client: fidl::client::sync::Client::new(channel, protocol_name) }
1914 }
1915
1916 pub fn into_channel(self) -> fidl::Channel {
1917 self.client.into_channel()
1918 }
1919
1920 /// Waits until an event arrives and returns it. It is safe for other
1921 /// threads to make concurrent requests while waiting for an event.
1922 pub fn wait_for_event(
1923 &self,
1924 deadline: zx::MonotonicInstant,
1925 ) -> Result<BufferCollectionEvent, fidl::Error> {
1926 BufferCollectionEvent::decode(self.client.wait_for_event(deadline)?)
1927 }
1928
1929 /// Ensure that previous messages have been received server side. This is
1930 /// particularly useful after previous messages that created new tokens,
1931 /// because a token must be known to the sysmem server before sending the
1932 /// token to another participant.
1933 ///
1934 /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
1935 /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
1936 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
1937 /// to mitigate the possibility of a hostile/fake
1938 /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
1939 /// Another way is to pass the token to
1940 /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
1941 /// the token as part of exchanging it for a
1942 /// [`fuchsia.sysmem2/BufferCollection`] channel, and
1943 /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
1944 /// of stalling.
1945 ///
1946 /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
1947 /// and then starting and completing a `Sync`, it's then safe to send the
1948 /// `BufferCollectionToken` client ends to other participants knowing the
1949 /// server will recognize the tokens when they're sent by the other
1950 /// participants to sysmem in a
1951 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
1952 /// efficient way to create tokens while avoiding unnecessary round trips.
1953 ///
1954 /// Other options include waiting for each
1955 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
1956 /// individually (using separate call to `Sync` after each), or calling
1957 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
1958 /// converted to a `BufferCollection` via
1959 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
1960 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
1961 /// the sync step and can create multiple tokens at once.
1962 pub fn r#sync(&self, ___deadline: zx::MonotonicInstant) -> Result<(), fidl::Error> {
1963 let _response = self.client.send_query::<
1964 fidl::encoding::EmptyPayload,
1965 fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
1966 >(
1967 (),
1968 0x11ac2555cf575b54,
1969 fidl::encoding::DynamicFlags::FLEXIBLE,
1970 ___deadline,
1971 )?
1972 .into_result::<BufferCollectionMarker>("sync")?;
1973 Ok(_response)
1974 }
1975
1976 /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
1977 ///
1978 /// Normally a participant will convert a `BufferCollectionToken` into a
1979 /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
1980 /// `Release` via the token (and then close the channel immediately or
1981 /// shortly later in response to server closing the server end), which
1982 /// avoids causing buffer collection failure. Without a prior `Release`,
1983 /// closing the `BufferCollectionToken` client end will cause buffer
1984 /// collection failure.
1985 ///
1986 /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
1987 ///
1988 /// By default the server handles unexpected closure of a
1989 /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
1990 /// first) by failing the buffer collection. Partly this is to expedite
1991 /// closing VMO handles to reclaim memory when any participant fails. If a
1992 /// participant would like to cleanly close a `BufferCollection` without
1993 /// causing buffer collection failure, the participant can send `Release`
1994 /// before closing the `BufferCollection` client end. The `Release` can
1995 /// occur before or after `SetConstraints`. If before `SetConstraints`, the
1996 /// buffer collection won't require constraints from this node in order to
1997 /// allocate. If after `SetConstraints`, the constraints are retained and
1998 /// aggregated, despite the lack of `BufferCollection` connection at the
1999 /// time of constraints aggregation.
2000 ///
2001 /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
2002 ///
2003 /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
2004 /// end (without `Release` first) will trigger failure of the buffer
2005 /// collection. To close a `BufferCollectionTokenGroup` channel without
2006 /// failing the buffer collection, ensure that AllChildrenPresent() has been
2007 /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
2008 /// client end.
2009 ///
2010 /// If `Release` occurs before
2011 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
2012 /// buffer collection will fail (triggered by reception of `Release` without
2013 /// prior `AllChildrenPresent`). This is intentionally not analogous to how
2014 /// [`fuchsia.sysmem2/BufferCollection.Release`] without
2015 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
2016 /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
2017 /// close requires `AllChildrenPresent` (if not already sent), then
2018 /// `Release`, then close client end.
2019 ///
2020 /// If `Release` occurs after `AllChildrenPresent`, the children and all
2021 /// their constraints remain intact (just as they would if the
2022 /// `BufferCollectionTokenGroup` channel had remained open), and the client
2023 /// end close doesn't trigger buffer collection failure.
2024 ///
2025 /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
2026 ///
2027 /// For brevity, the per-channel-protocol paragraphs above ignore the
2028 /// separate failure domain created by
2029 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
2030 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
2031 /// unexpectedly closes (without `Release` first) and that client end is
2032 /// under a failure domain, instead of failing the whole buffer collection,
2033 /// the failure domain is failed, but the buffer collection itself is
2034 /// isolated from failure of the failure domain. Such failure domains can be
2035 /// nested, in which case only the inner-most failure domain in which the
2036 /// `Node` resides fails.
2037 pub fn r#release(&self) -> Result<(), fidl::Error> {
2038 self.client.send::<fidl::encoding::EmptyPayload>(
2039 (),
2040 0x6a5cae7d6d6e04c6,
2041 fidl::encoding::DynamicFlags::FLEXIBLE,
2042 )
2043 }
2044
2045 /// Set a name for VMOs in this buffer collection.
2046 ///
2047 /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
2048 /// will be truncated to fit. The name of the vmo will be suffixed with the
2049 /// buffer index within the collection (if the suffix fits within
2050 /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
2051 /// listed in the inspect data.
2052 ///
2053 /// The name only affects VMOs allocated after the name is set; this call
2054 /// does not rename existing VMOs. If multiple clients set different names
2055 /// then the larger priority value will win. Setting a new name with the
2056 /// same priority as a prior name doesn't change the name.
2057 ///
2058 /// All table fields are currently required.
2059 ///
2060 /// + request `priority` The name is only set if this is the first `SetName`
2061 /// or if `priority` is greater than any previous `priority` value in
2062 /// prior `SetName` calls across all `Node`(s) of this buffer collection.
2063 /// + request `name` The name for VMOs created under this buffer collection.
2064 pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
2065 self.client.send::<NodeSetNameRequest>(
2066 payload,
2067 0xb41f1624f48c1e9,
2068 fidl::encoding::DynamicFlags::FLEXIBLE,
2069 )
2070 }
2071
2072 /// Set information about the current client that can be used by sysmem to
2073 /// help diagnose leaking memory and allocation stalls waiting for a
2074 /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
2075 ///
2076 /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
2077 /// `Node`(s) derived from this `Node`, unless overriden by
2078 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
2079 /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
2080 ///
2081 /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
2082 /// `Allocator` is the most efficient way to ensure that all
2083 /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
2084 /// set, and is also more efficient than separately sending the same debug
2085 /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
2086 /// created [`fuchsia.sysmem2/Node`].
2087 ///
2088 /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
2089 /// indicate which client is closing their channel first, leading to subtree
2090 /// failure (which can be normal if the purpose of the subtree is over, but
2091 /// if happening earlier than expected, the client-channel-specific name can
2092 /// help diagnose where the failure is first coming from, from sysmem's
2093 /// point of view).
2094 ///
2095 /// All table fields are currently required.
2096 ///
2097 /// + request `name` This can be an arbitrary string, but the current
2098 /// process name (see `fsl::GetCurrentProcessName`) is a good default.
2099 /// + request `id` This can be an arbitrary id, but the current process ID
2100 /// (see `fsl::GetCurrentProcessKoid`) is a good default.
2101 pub fn r#set_debug_client_info(
2102 &self,
2103 mut payload: &NodeSetDebugClientInfoRequest,
2104 ) -> Result<(), fidl::Error> {
2105 self.client.send::<NodeSetDebugClientInfoRequest>(
2106 payload,
2107 0x5cde8914608d99b1,
2108 fidl::encoding::DynamicFlags::FLEXIBLE,
2109 )
2110 }
2111
2112 /// Sysmem logs a warning if sysmem hasn't seen
2113 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
2114 /// within 5 seconds after creation of a new collection.
2115 ///
2116 /// Clients can call this method to change when the log is printed. If
2117 /// multiple client set the deadline, it's unspecified which deadline will
2118 /// take effect.
2119 ///
2120 /// In most cases the default works well.
2121 ///
2122 /// All table fields are currently required.
2123 ///
2124 /// + request `deadline` The time at which sysmem will start trying to log
2125 /// the warning, unless all constraints are with sysmem by then.
2126 pub fn r#set_debug_timeout_log_deadline(
2127 &self,
2128 mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
2129 ) -> Result<(), fidl::Error> {
2130 self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
2131 payload,
2132 0x716b0af13d5c0806,
2133 fidl::encoding::DynamicFlags::FLEXIBLE,
2134 )
2135 }
2136
2137 /// This enables verbose logging for the buffer collection.
2138 ///
2139 /// Verbose logging includes constraints set via
2140 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
2141 /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
2142 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
2143 /// the tree of `Node`(s).
2144 ///
2145 /// Normally sysmem prints only a single line complaint when aggregation
2146 /// fails, with just the specific detailed reason that aggregation failed,
2147 /// with little surrounding context. While this is often enough to diagnose
2148 /// a problem if only a small change was made and everything was working
2149 /// before the small change, it's often not particularly helpful for getting
2150 /// a new buffer collection to work for the first time. Especially with
2151 /// more complex trees of nodes, involving things like
2152 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
2153 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
2154 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
2155 /// subtrees of nodes, verbose logging may help in diagnosing what the tree
2156 /// looks like and why it's failing a logical allocation, or why a tree or
2157 /// subtree is failing sooner than expected.
2158 ///
2159 /// The intent of the extra logging is to be acceptable from a performance
2160 /// point of view, under the assumption that verbose logging is only enabled
2161 /// on a low number of buffer collections. If we're not tracking down a bug,
2162 /// we shouldn't send this message.
2163 pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
2164 self.client.send::<fidl::encoding::EmptyPayload>(
2165 (),
2166 0x5209c77415b4dfad,
2167 fidl::encoding::DynamicFlags::FLEXIBLE,
2168 )
2169 }
2170
2171 /// This gets a handle that can be used as a parameter to
2172 /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
2173 /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
2174 /// client obtained this handle from this `Node`.
2175 ///
2176 /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
2177 /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
2178 /// despite the two calls typically being on different channels.
2179 ///
2180 /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
2181 ///
2182 /// All table fields are currently required.
2183 ///
2184 /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
2185 /// different `Node` channel, to prove that the client obtained the handle
2186 /// from this `Node`.
2187 pub fn r#get_node_ref(
2188 &self,
2189 ___deadline: zx::MonotonicInstant,
2190 ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
2191 let _response = self.client.send_query::<
2192 fidl::encoding::EmptyPayload,
2193 fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
2194 >(
2195 (),
2196 0x5b3d0e51614df053,
2197 fidl::encoding::DynamicFlags::FLEXIBLE,
2198 ___deadline,
2199 )?
2200 .into_result::<BufferCollectionMarker>("get_node_ref")?;
2201 Ok(_response)
2202 }
2203
2204 /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
2205 /// rooted at a different child token of a common parent
2206 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
2207 /// passed-in `node_ref`.
2208 ///
2209 /// This call is for assisting with admission control de-duplication, and
2210 /// with debugging.
2211 ///
2212 /// The `node_ref` must be obtained using
2213 /// [`fuchsia.sysmem2/Node.GetNodeRef`].
2214 ///
2215 /// The `node_ref` can be a duplicated handle; it's not necessary to call
2216 /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
2217 ///
2218 /// If a calling token may not actually be a valid token at all due to a
2219 /// potentially hostile/untrusted provider of the token, call
2220 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
2221 /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
2222 /// never responds due to a calling token not being a real token (not really
2223 /// talking to sysmem). Another option is to call
2224 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
2225 /// which also validates the token along with converting it to a
2226 /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
2227 ///
2228 /// All table fields are currently required.
2229 ///
2230 /// - response `is_alternate`
2231 /// - true: The first parent node in common between the calling node and
2232 /// the `node_ref` `Node` is a `BufferCollectionTokenGroup`. This means
2233 /// that the calling `Node` and the `node_ref` `Node` will not have both
2234 /// their constraints apply - rather sysmem will choose one or the other
2235 /// of the constraints - never both. This is because only one child of
2236 /// a `BufferCollectionTokenGroup` is selected during logical
2237 /// allocation, with only that one child's subtree contributing to
2238 /// constraints aggregation.
2239 /// - false: The first parent node in common between the calling `Node`
2240 /// and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
2241 /// Currently, this means the first parent node in common is a
2242 /// `BufferCollectionToken` or `BufferCollection` (regardless of not
2243 /// `Release`ed). This means that the calling `Node` and the `node_ref`
2244 /// `Node` may have both their constraints apply during constraints
2245 /// aggregation of the logical allocation, if both `Node`(s) are
2246 /// selected by any parent `BufferCollectionTokenGroup`(s) involved. In
2247 /// this case, there is no `BufferCollectionTokenGroup` that will
2248 /// directly prevent the two `Node`(s) from both being selected and
2249 /// their constraints both aggregated, but even when false, one or both
2250 /// `Node`(s) may still be eliminated from consideration if one or both
2251 /// `Node`(s) has a direct or indirect parent
2252 /// `BufferCollectionTokenGroup` which selects a child subtree other
2253 /// than the subtree containing the calling `Node` or `node_ref` `Node`.
2254 /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
2255 /// associated with the same buffer collection as the calling `Node`.
2256 /// Another reason for this error is if the `node_ref` is an
2257 /// [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
2258 /// a real `node_ref` obtained from `GetNodeRef`.
2259 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
2260 /// `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
2261 /// the needed rights expected on a real `node_ref`.
2262 /// * No other failing status codes are returned by this call. However,
2263 /// sysmem may add additional codes in future, so the client should have
2264 /// sensible default handling for any failing status code.
2265 pub fn r#is_alternate_for(
2266 &self,
2267 mut payload: NodeIsAlternateForRequest,
2268 ___deadline: zx::MonotonicInstant,
2269 ) -> Result<NodeIsAlternateForResult, fidl::Error> {
2270 let _response = self.client.send_query::<
2271 NodeIsAlternateForRequest,
2272 fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
2273 >(
2274 &mut payload,
2275 0x3a58e00157e0825,
2276 fidl::encoding::DynamicFlags::FLEXIBLE,
2277 ___deadline,
2278 )?
2279 .into_result::<BufferCollectionMarker>("is_alternate_for")?;
2280 Ok(_response.map(|x| x))
2281 }
2282
2283 /// Get the buffer collection ID. This ID is also available from
2284 /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
2285 /// within the collection).
2286 ///
2287 /// This call is mainly useful in situations where we can't convey a
2288 /// [`fuchsia.sysmem2/BufferCollectionToken`] or
2289 /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
2290 /// handle, which can be joined back up with a `BufferCollection` client end
2291 /// that was created via a different path. Prefer to convey a
2292 /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
2293 ///
2294 /// Trusting a `buffer_collection_id` value from a source other than sysmem
2295 /// is analogous to trusting a koid value from a source other than zircon.
2296 /// Both should be avoided unless really necessary, and both require
2297 /// caution. In some situations it may be reasonable to refer to a
2298 /// pre-established `BufferCollection` by `buffer_collection_id` via a
2299 /// protocol for efficiency reasons, but an incoming value purporting to be
2300 /// a `buffer_collection_id` is not sufficient alone to justify granting the
2301 /// sender of the `buffer_collection_id` any capability. The sender must
2302 /// first prove to a receiver that the sender has/had a VMO or has/had a
2303 /// `BufferCollectionToken` to the same collection by sending a handle that
2304 /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
2305 /// `buffer_collection_id` value. The receiver should take care to avoid
2306 /// assuming that a sender had a `BufferCollectionToken` in cases where the
2307 /// sender has only proven that the sender had a VMO.
2308 ///
2309 /// - response `buffer_collection_id` This ID is unique per buffer
2310 /// collection per boot. Each buffer is uniquely identified by the
2311 /// `buffer_collection_id` and `buffer_index` together.
2312 pub fn r#get_buffer_collection_id(
2313 &self,
2314 ___deadline: zx::MonotonicInstant,
2315 ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
2316 let _response = self.client.send_query::<
2317 fidl::encoding::EmptyPayload,
2318 fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
2319 >(
2320 (),
2321 0x77d19a494b78ba8c,
2322 fidl::encoding::DynamicFlags::FLEXIBLE,
2323 ___deadline,
2324 )?
2325 .into_result::<BufferCollectionMarker>("get_buffer_collection_id")?;
2326 Ok(_response)
2327 }
2328
2329 /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
2330 /// created after this message to weak, which means that a client's `Node`
2331 /// client end (or a child created after this message) is not alone
2332 /// sufficient to keep allocated VMOs alive.
2333 ///
2334 /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
2335 /// `close_weak_asap`.
2336 ///
2337 /// This message is only permitted before the `Node` becomes ready for
2338 /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
2339 /// * `BufferCollectionToken`: any time
2340 /// * `BufferCollection`: before `SetConstraints`
2341 /// * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
2342 ///
2343 /// Currently, no conversion from strong `Node` to weak `Node` after ready
2344 /// for allocation is provided, but a client can simulate that by creating
2345 /// an additional `Node` before allocation and setting that additional
2346 /// `Node` to weak, and then potentially at some point later sending
2347 /// `Release` and closing the client end of the client's strong `Node`, but
2348 /// keeping the client's weak `Node`.
2349 ///
2350 /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
2351 /// collection failure (all `Node` client end(s) will see
2352 /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
2353 /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
2354 /// this situation until all `Node`(s) are ready for allocation. For initial
2355 /// allocation to succeed, at least one strong `Node` is required to exist
2356 /// at allocation time, but after that client receives VMO handles, that
2357 /// client can `BufferCollection.Release` and close the client end without
2358 /// causing this type of failure.
2359 ///
2360 /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
2361 /// imply `SetWeakOk` with `for_children_also` true, which can be sent
2362 /// separately as appropriate.
2363 pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
2364 self.client.send::<fidl::encoding::EmptyPayload>(
2365 (),
2366 0x22dd3ea514eeffe1,
2367 fidl::encoding::DynamicFlags::FLEXIBLE,
2368 )
2369 }
2370
2371 /// This indicates to sysmem that the client is prepared to pay attention to
2372 /// `close_weak_asap`.
2373 ///
2374 /// If sent, this message must be before
2375 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
2376 ///
2377 /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
2378 /// send this message before `WaitForAllBuffersAllocated`, or a parent
2379 /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
2380 /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
2381 /// trigger buffer collection failure.
2382 ///
2383 /// This message is necessary because weak sysmem VMOs have not always been
2384 /// a thing, so older clients are not aware of the need to pay attention to
2385 /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
2386 /// sysmem weak VMO handles asap. By having this message and requiring
2387 /// participants to indicate their acceptance of this aspect of the overall
2388 /// protocol, we avoid situations where an older client is delivered a weak
2389 /// VMO without any way for sysmem to get that VMO to close quickly later
2390 /// (and on a per-buffer basis).
2391 ///
2392 /// A participant that doesn't handle `close_weak_asap` and also doesn't
2393 /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
2394 /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
2395 /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
2396 /// same participant has a child/delegate which does retrieve VMOs, that
2397 /// child/delegate will need to send `SetWeakOk` before
2398 /// `WaitForAllBuffersAllocated`.
2399 ///
2400 /// + request `for_child_nodes_also` If present and true, this means direct
2401 /// child nodes of this node created after this message plus all
2402 /// descendants of those nodes will behave as if `SetWeakOk` was sent on
2403 /// those nodes. Any child node of this node that was created before this
2404 /// message is not included. This setting is "sticky" in the sense that a
2405 /// subsequent `SetWeakOk` without this bool set to true does not reset
2406 /// the server-side bool. If this creates a problem for a participant, a
2407 /// workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
2408 /// tokens instead, as appropriate. A participant should only set
2409 /// `for_child_nodes_also` true if the participant can really promise to
2410 /// obey `close_weak_asap` both for its own weak VMO handles, and for all
2411 /// weak VMO handles held by participants holding the corresponding child
2412 /// `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
2413 /// which are using sysmem(1) can be weak, despite the clients of those
2414 /// sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
2415 /// direct way to find out about `close_weak_asap`. This only applies to
2416 /// descendents of this `Node` which are using sysmem(1), not to this
2417 /// `Node` when converted directly from a sysmem2 token to a sysmem(1)
2418 /// token, which will fail allocation unless an ancestor of this `Node`
2419 /// specified `for_child_nodes_also` true.
2420 pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
2421 self.client.send::<NodeSetWeakOkRequest>(
2422 &mut payload,
2423 0x38a44fc4d7724be9,
2424 fidl::encoding::DynamicFlags::FLEXIBLE,
2425 )
2426 }
2427
2428 /// The server_end will be closed after this `Node` and any child nodes have
2429 /// have released their buffer counts, making those counts available for
2430 /// reservation by a different `Node` via
2431 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
2432 ///
2433 /// The `Node` buffer counts may not be released until the entire tree of
2434 /// `Node`(s) is closed or failed, because
2435 /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
2436 /// does not immediately un-reserve the `Node` buffer counts. Instead, the
2437 /// `Node` buffer counts remain reserved until the orphaned node is later
2438 /// cleaned up.
2439 ///
2440 /// If the `Node` exceeds a fairly large number of attached eventpair server
2441 /// ends, a log message will indicate this and the `Node` (and the
2442 /// appropriate) sub-tree will fail.
2443 ///
2444 /// The `server_end` will remain open when
2445 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
2446 /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
2447 /// [`fuchsia.sysmem2/BufferCollection`].
2448 ///
2449 /// This message can also be used with a
2450 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
2451 pub fn r#attach_node_tracking(
2452 &self,
2453 mut payload: NodeAttachNodeTrackingRequest,
2454 ) -> Result<(), fidl::Error> {
2455 self.client.send::<NodeAttachNodeTrackingRequest>(
2456 &mut payload,
2457 0x3f22f2a293d3cdac,
2458 fidl::encoding::DynamicFlags::FLEXIBLE,
2459 )
2460 }
2461
2462 /// Provide [`fuchsia.sysmem2/BufferCollectionConstraints`] to the buffer
2463 /// collection.
2464 ///
2465 /// A participant may only call
2466 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] up to once per
2467 /// [`fuchsia.sysmem2/BufferCollection`].
2468 ///
2469 /// For buffer allocation to be attempted, all holders of a
2470 /// `BufferCollection` client end need to call `SetConstraints` before
2471 /// sysmem will attempt to allocate buffers.
2472 ///
2473 /// + request `constraints` These are the constraints on the buffer
2474 /// collection imposed by the sending client/participant. The
2475 /// `constraints` field is not required to be set. If not set, the client
2476 /// is not setting any actual constraints, but is indicating that the
2477 /// client has no constraints to set. A client that doesn't set the
2478 /// `constraints` field won't receive any VMO handles, but can still find
2479 /// out how many buffers were allocated and can still refer to buffers by
2480 /// their `buffer_index`.
2481 pub fn r#set_constraints(
2482 &self,
2483 mut payload: BufferCollectionSetConstraintsRequest,
2484 ) -> Result<(), fidl::Error> {
2485 self.client.send::<BufferCollectionSetConstraintsRequest>(
2486 &mut payload,
2487 0x1fde0f19d650197b,
2488 fidl::encoding::DynamicFlags::FLEXIBLE,
2489 )
2490 }
2491
2492 /// Wait until all buffers are allocated.
2493 ///
2494 /// This FIDL call completes when buffers have been allocated, or completes
2495 /// with some failure detail if allocation has been attempted but failed.
2496 ///
2497 /// The following must occur before buffers will be allocated:
2498 /// * All [`fuchsia.sysmem2/BufferCollectionToken`](s) of the buffer
2499 /// collection must be turned in via `BindSharedCollection` to get a
2500 /// [`fuchsia.sysmem2/BufferCollection`] (for brevity, this is assuming
2501 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`] isn't being used),
2502 /// or have had [`fuchsia.sysmem2/BufferCollectionToken.Release`] sent
2503 /// to them.
2504 /// * All [`fuchsia.sysmem2/BufferCollection`](s) of the buffer collection
2505 /// must have had [`fuchsia.sysmem2/BufferCollection.SetConstraints`]
2506 /// sent to them, or had [`fuchsia.sysmem2/BufferCollection.Release`]
2507 /// sent to them.
2508 ///
2509 /// - result `buffer_collection_info` The VMO handles and other related
2510 /// info.
2511 /// * error `[fuchsia.sysmem2/Error.NO_MEMORY]` The request is valid but
2512 /// cannot be fulfilled due to resource exhaustion.
2513 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION`] The request is
2514 /// malformed.
2515 /// * error `[fuchsia.sysmem2/Error.CONSTRAINTS_INTERSECTION_EMPTY`] The
2516 /// request is valid but cannot be satisfied, perhaps due to hardware
2517 /// limitations. This can happen if participants have incompatible
2518 /// constraints (empty intersection, roughly speaking). See the log for
2519 /// more info. In cases where a participant could potentially be treated
2520 /// as optional, see [`BufferCollectionTokenGroup`]. When using
2521 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`], this will be the
2522 /// error code if there aren't enough buffers in the pre-existing
2523 /// collection to satisfy the constraints set on the attached token and
2524 /// any sub-tree of tokens derived from the attached token.
2525 pub fn r#wait_for_all_buffers_allocated(
2526 &self,
2527 ___deadline: zx::MonotonicInstant,
2528 ) -> Result<BufferCollectionWaitForAllBuffersAllocatedResult, fidl::Error> {
2529 let _response = self
2530 .client
2531 .send_query::<fidl::encoding::EmptyPayload, fidl::encoding::FlexibleResultType<
2532 BufferCollectionWaitForAllBuffersAllocatedResponse,
2533 Error,
2534 >>(
2535 (), 0x62300344b61404e, fidl::encoding::DynamicFlags::FLEXIBLE, ___deadline
2536 )?
2537 .into_result::<BufferCollectionMarker>("wait_for_all_buffers_allocated")?;
2538 Ok(_response.map(|x| x))
2539 }
2540
2541 /// Checks whether all the buffers have been allocated, in a polling
2542 /// fashion.
2543 ///
2544 /// * If the buffer collection has been allocated, returns success.
2545 /// * If the buffer collection failed allocation, returns the same
2546 /// [`fuchsia.sysmem2/Error`] as
2547 /// [`fuchsia.sysmem2/BufferCollection/WaitForAllBuffersAllocated`] would
2548 /// return.
2549 /// * error [`fuchsia.sysmem2/Error.PENDING`] The buffer collection hasn't
2550 /// attempted allocation yet. This means that WaitForAllBuffersAllocated
2551 /// would not respond quickly.
2552 pub fn r#check_all_buffers_allocated(
2553 &self,
2554 ___deadline: zx::MonotonicInstant,
2555 ) -> Result<BufferCollectionCheckAllBuffersAllocatedResult, fidl::Error> {
2556 let _response = self.client.send_query::<
2557 fidl::encoding::EmptyPayload,
2558 fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
2559 >(
2560 (),
2561 0x35a5fe77ce939c10,
2562 fidl::encoding::DynamicFlags::FLEXIBLE,
2563 ___deadline,
2564 )?
2565 .into_result::<BufferCollectionMarker>("check_all_buffers_allocated")?;
2566 Ok(_response.map(|x| x))
2567 }
2568
2569 /// Create a new token to add a new participant to an existing logical
2570 /// buffer collection, if the existing collection's buffer counts,
2571 /// constraints, and participants allow.
2572 ///
2573 /// This can be useful in replacing a failed participant, and/or in
2574 /// adding/re-adding a participant after buffers have already been
2575 /// allocated.
2576 ///
2577 /// When [`fuchsia.sysmem2/BufferCollection.AttachToken`] is used, the sub
2578 /// tree rooted at the attached [`fuchsia.sysmem2/BufferCollectionToken`]
2579 /// goes through the normal procedure of setting constraints or closing
2580 /// [`fuchsia.sysmem2/Node`](s), and then appearing to allocate buffers from
2581 /// clients' point of view, despite the possibility that all the buffers
2582 /// were actually allocated previously. This process is called "logical
2583 /// allocation". Most instances of "allocation" in docs for other messages
2584 /// can also be read as "allocation or logical allocation" while remaining
2585 /// valid, but we just say "allocation" in most places for brevity/clarity
2586 /// of explanation, with the details of "logical allocation" left for the
2587 /// docs here on `AttachToken`.
2588 ///
2589 /// Failure of an attached `Node` does not propagate to the parent of the
2590 /// attached `Node`. More generally, failure of a child `Node` is blocked
2591 /// from reaching its parent `Node` if the child is attached, or if the
2592 /// child is dispensable and the failure occurred after logical allocation
2593 /// (see [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`]).
2594 ///
2595 /// A participant may in some scenarios choose to initially use a
2596 /// dispensable token for a given instance of a delegate participant, and
2597 /// then later if the first instance of that delegate participant fails, a
2598 /// new second instance of that delegate participant my be given a token
2599 /// created with `AttachToken`.
2600 ///
2601 /// From the point of view of the [`fuchsia.sysmem2/BufferCollectionToken`]
2602 /// client end, the token acts like any other token. The client can
2603 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] the token as needed,
2604 /// and can send the token to a different process/participant. The
2605 /// `BufferCollectionToken` `Node` should be converted to a
2606 /// `BufferCollection` `Node` as normal by sending
2607 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or can be closed
2608 /// without causing subtree failure by sending
2609 /// [`fuchsia.sysmem2/BufferCollectionToken.Release`]. Assuming the former,
2610 /// the [`fuchsia.sysmem2/BufferCollection.SetConstraints`] message or
2611 /// [`fuchsia.sysmem2/BufferCollection.Release`] message should be sent to
2612 /// the `BufferCollection`.
2613 ///
2614 /// Within the subtree, a success result from
2615 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`] means
2616 /// the subtree participants' constraints were satisfiable using the
2617 /// already-existing buffer collection, the already-established
2618 /// [`fuchsia.sysmem2/BufferCollectionInfo`] including image format
2619 /// constraints, and the already-existing other participants (already added
2620 /// via successful logical allocation) and their specified buffer counts in
2621 /// their constraints. A failure result means the new participants'
2622 /// constraints cannot be satisfied using the existing buffer collection and
2623 /// its already-added participants. Creating a new collection instead may
2624 /// allow all participants' constraints to be satisfied, assuming
2625 /// `SetDispensable` is used in place of `AttachToken`, or a normal token is
2626 /// used.
2627 ///
2628 /// A token created with `AttachToken` performs constraints aggregation with
2629 /// all constraints currently in effect on the buffer collection, plus the
2630 /// attached token under consideration plus child tokens under the attached
2631 /// token which are not themselves an attached token or under such a token.
2632 /// Further subtrees under this subtree are considered for logical
2633 /// allocation only after this subtree has completed logical allocation.
2634 ///
2635 /// Assignment of existing buffers to participants'
2636 /// [`fuchsia.sysmem2/BufferCollectionConstraints.min_buffer_count_for_camping`]
2637 /// etc is first-come first-served, but a child can't logically allocate
2638 /// before all its parents have sent `SetConstraints`.
2639 ///
2640 /// See also [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`], which
2641 /// in contrast to `AttachToken`, has the created token `Node` + child
2642 /// `Node`(s) (in the created subtree but not in any subtree under this
2643 /// subtree) participate in constraints aggregation along with its parent
2644 /// during the parent's allocation or logical allocation.
2645 ///
2646 /// Similar to [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`], the
2647 /// newly created token needs to be [`fuchsia.sysmem2/Node.Sync`]ed to
2648 /// sysmem before the new token can be passed to `BindSharedCollection`. The
2649 /// `Sync` of the new token can be accomplished with
2650 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after converting the created
2651 /// `BufferCollectionToken` to a `BufferCollection`. Alternately,
2652 /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on the new token also
2653 /// works. Or using [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`]
2654 /// works. As usual, a `BufferCollectionToken.Sync` can be started after any
2655 /// `BufferCollectionToken.Duplicate` messages have been sent via the newly
2656 /// created token, to also sync those additional tokens to sysmem using a
2657 /// single round-trip.
2658 ///
2659 /// All table fields are currently required.
2660 ///
2661 /// + request `rights_attentuation_mask` This allows attenuating the VMO
2662 /// rights of the subtree. These values for `rights_attenuation_mask`
2663 /// result in no attenuation (note that 0 is not on this list):
2664 /// + ZX_RIGHT_SAME_RIGHTS (preferred)
2665 /// + 0xFFFFFFFF (this is reasonable when an attenuation mask is computed)
2666 /// + request `token_request` The server end of the `BufferCollectionToken`
2667 /// channel. The client retains the client end.
2668 pub fn r#attach_token(
2669 &self,
2670 mut payload: BufferCollectionAttachTokenRequest,
2671 ) -> Result<(), fidl::Error> {
2672 self.client.send::<BufferCollectionAttachTokenRequest>(
2673 &mut payload,
2674 0x46ac7d0008492982,
2675 fidl::encoding::DynamicFlags::FLEXIBLE,
2676 )
2677 }
2678
2679 /// Set up an eventpair to be signalled (`ZX_EVENTPAIR_PEER_CLOSED`) when
2680 /// buffers have been allocated and only the specified number of buffers (or
2681 /// fewer) remain in the buffer collection.
2682 ///
2683 /// [`fuchsia.sysmem2/BufferCollection.AttachLifetimeTracking`] allows a
2684 /// client to wait until an old buffer collection is fully or mostly
2685 /// deallocated before attempting allocation of a new buffer collection. The
2686 /// eventpair is only signalled when the buffers of this collection have
2687 /// been fully deallocated (not just un-referenced by clients, but all the
2688 /// memory consumed by those buffers has been fully reclaimed/recycled), or
2689 /// when allocation or logical allocation fails for the tree or subtree
2690 /// including this [`fuchsia.sysmem2/BufferCollection`].
2691 ///
2692 /// The eventpair won't be signalled until allocation or logical allocation
2693 /// has completed; until then, the collection's current buffer count is
2694 /// ignored.
2695 ///
2696 /// If logical allocation fails for an attached subtree (using
2697 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]), the server end of the
2698 /// eventpair will close during that failure regardless of the number of
2699 /// buffers potenitally allocated in the overall buffer collection. This is
2700 /// for logical allocation consistency with normal allocation.
2701 ///
2702 /// The lifetime signalled by this event includes asynchronous cleanup of
2703 /// allocated buffers, and this asynchronous cleanup cannot occur until all
2704 /// holders of VMO handles to the buffers have closed those VMO handles.
2705 /// Therefore, clients should take care not to become blocked forever
2706 /// waiting for `ZX_EVENTPAIR_PEER_CLOSED` to be signalled if any of the
2707 /// participants using the logical buffer collection (including the waiter
2708 /// itself) are less trusted, less reliable, or potentially blocked by the
2709 /// wait itself. Waiting asynchronously is recommended. Setting a deadline
2710 /// for the client wait may be prudent, depending on details of how the
2711 /// collection and/or its VMOs are used or shared. Failure to allocate a
2712 /// new/replacement buffer collection is better than getting stuck forever.
2713 ///
2714 /// The sysmem server itself intentionally does not perform any waiting on
2715 /// already-failed collections' VMOs to finish cleaning up before attempting
2716 /// a new allocation, and the sysmem server intentionally doesn't retry
2717 /// allocation if a new allocation fails due to out of memory, even if that
2718 /// failure is potentially due to continued existence of an old collection's
2719 /// VMOs. This `AttachLifetimeTracking` message is how an initiator can
2720 /// mitigate too much overlap of old VMO lifetimes with new VMO lifetimes,
2721 /// as long as the waiting client is careful to not create a deadlock.
2722 ///
2723 /// Continued existence of old collections that are still cleaning up is not
2724 /// the only reason that a new allocation may fail due to insufficient
2725 /// memory, even if the new allocation is allocating physically contiguous
2726 /// buffers. Overall system memory pressure can also be the cause of failure
2727 /// to allocate a new collection. See also
2728 /// [`fuchsia.memorypressure/Provider`].
2729 ///
2730 /// `AttachLifetimeTracking` is meant to be compatible with other protocols
2731 /// with a similar `AttachLifetimeTracking` message; duplicates of the same
2732 /// `eventpair` handle (server end) can be sent via more than one
2733 /// `AttachLifetimeTracking` message to different protocols, and the
2734 /// `ZX_EVENTPAIR_PEER_CLOSED` will be signalled for the client end when all
2735 /// the conditions are met (all holders of duplicates have closed their
2736 /// server end handle(s)). Also, thanks to how eventpair endponts work, the
2737 /// client end can (also) be duplicated without preventing the
2738 /// `ZX_EVENTPAIR_PEER_CLOSED` signal.
2739 ///
2740 /// The server intentionally doesn't "trust" any signals set on the
2741 /// `server_end`. This mechanism intentionally uses only
2742 /// `ZX_EVENTPAIR_PEER_CLOSED` set on the client end, which can't be set
2743 /// "early", and is only set when all handles to the server end eventpair
2744 /// are closed. No meaning is associated with any of the other signals, and
2745 /// clients should ignore any other signal bits on either end of the
2746 /// `eventpair`.
2747 ///
2748 /// The `server_end` may lack `ZX_RIGHT_SIGNAL` or `ZX_RIGHT_SIGNAL_PEER`,
2749 /// but must have `ZX_RIGHT_DUPLICATE` (and must have `ZX_RIGHT_TRANSFER` to
2750 /// transfer without causing `BufferCollection` channel failure).
2751 ///
2752 /// All table fields are currently required.
2753 ///
2754 /// + request `server_end` This eventpair handle will be closed by the
2755 /// sysmem server when buffers have been allocated initially and the
2756 /// number of buffers is then less than or equal to `buffers_remaining`.
2757 /// + request `buffers_remaining` Wait for all but `buffers_remaining` (or
2758 /// fewer) buffers to be fully deallocated. A number greater than zero can
2759 /// be useful in situations where a known number of buffers are
2760 /// intentionally not closed so that the data can continue to be used,
2761 /// such as for keeping the last available video frame displayed in the UI
2762 /// even if the video stream was using protected output buffers. It's
2763 /// outside the scope of the `BufferCollection` interface (at least for
2764 /// now) to determine how many buffers may be held without closing, but
2765 /// it'll typically be in the range 0-2.
2766 pub fn r#attach_lifetime_tracking(
2767 &self,
2768 mut payload: BufferCollectionAttachLifetimeTrackingRequest,
2769 ) -> Result<(), fidl::Error> {
2770 self.client.send::<BufferCollectionAttachLifetimeTrackingRequest>(
2771 &mut payload,
2772 0x3ecb510113116dcf,
2773 fidl::encoding::DynamicFlags::FLEXIBLE,
2774 )
2775 }
2776}
2777
2778#[cfg(target_os = "fuchsia")]
2779impl From<BufferCollectionSynchronousProxy> for zx::NullableHandle {
2780 fn from(value: BufferCollectionSynchronousProxy) -> Self {
2781 value.into_channel().into()
2782 }
2783}
2784
2785#[cfg(target_os = "fuchsia")]
2786impl From<fidl::Channel> for BufferCollectionSynchronousProxy {
2787 fn from(value: fidl::Channel) -> Self {
2788 Self::new(value)
2789 }
2790}
2791
2792#[cfg(target_os = "fuchsia")]
2793impl fidl::endpoints::FromClient for BufferCollectionSynchronousProxy {
2794 type Protocol = BufferCollectionMarker;
2795
2796 fn from_client(value: fidl::endpoints::ClientEnd<BufferCollectionMarker>) -> Self {
2797 Self::new(value.into_channel())
2798 }
2799}
2800
2801#[derive(Debug, Clone)]
2802pub struct BufferCollectionProxy {
2803 client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
2804}
2805
2806impl fidl::endpoints::Proxy for BufferCollectionProxy {
2807 type Protocol = BufferCollectionMarker;
2808
2809 fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
2810 Self::new(inner)
2811 }
2812
2813 fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
2814 self.client.into_channel().map_err(|client| Self { client })
2815 }
2816
2817 fn as_channel(&self) -> &::fidl::AsyncChannel {
2818 self.client.as_channel()
2819 }
2820}
2821
2822impl BufferCollectionProxy {
2823 /// Create a new Proxy for fuchsia.sysmem2/BufferCollection.
2824 pub fn new(channel: ::fidl::AsyncChannel) -> Self {
2825 let protocol_name = <BufferCollectionMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
2826 Self { client: fidl::client::Client::new(channel, protocol_name) }
2827 }
2828
2829 /// Get a Stream of events from the remote end of the protocol.
2830 ///
2831 /// # Panics
2832 ///
2833 /// Panics if the event stream was already taken.
2834 pub fn take_event_stream(&self) -> BufferCollectionEventStream {
2835 BufferCollectionEventStream { event_receiver: self.client.take_event_receiver() }
2836 }
2837
2838 /// Ensure that previous messages have been received server side. This is
2839 /// particularly useful after previous messages that created new tokens,
2840 /// because a token must be known to the sysmem server before sending the
2841 /// token to another participant.
2842 ///
2843 /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
2844 /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
2845 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
2846 /// to mitigate the possibility of a hostile/fake
2847 /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
2848 /// Another way is to pass the token to
2849 /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
2850 /// the token as part of exchanging it for a
2851 /// [`fuchsia.sysmem2/BufferCollection`] channel, and
2852 /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
2853 /// of stalling.
2854 ///
2855 /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
2856 /// and then starting and completing a `Sync`, it's then safe to send the
2857 /// `BufferCollectionToken` client ends to other participants knowing the
2858 /// server will recognize the tokens when they're sent by the other
2859 /// participants to sysmem in a
2860 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
2861 /// efficient way to create tokens while avoiding unnecessary round trips.
2862 ///
2863 /// Other options include waiting for each
2864 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
2865 /// individually (using separate call to `Sync` after each), or calling
2866 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
2867 /// converted to a `BufferCollection` via
2868 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
2869 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
2870 /// the sync step and can create multiple tokens at once.
2871 pub fn r#sync(
2872 &self,
2873 ) -> fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect> {
2874 BufferCollectionProxyInterface::r#sync(self)
2875 }
2876
2877 /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
2878 ///
2879 /// Normally a participant will convert a `BufferCollectionToken` into a
2880 /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
2881 /// `Release` via the token (and then close the channel immediately or
2882 /// shortly later in response to server closing the server end), which
2883 /// avoids causing buffer collection failure. Without a prior `Release`,
2884 /// closing the `BufferCollectionToken` client end will cause buffer
2885 /// collection failure.
2886 ///
2887 /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
2888 ///
2889 /// By default the server handles unexpected closure of a
2890 /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
2891 /// first) by failing the buffer collection. Partly this is to expedite
2892 /// closing VMO handles to reclaim memory when any participant fails. If a
2893 /// participant would like to cleanly close a `BufferCollection` without
2894 /// causing buffer collection failure, the participant can send `Release`
2895 /// before closing the `BufferCollection` client end. The `Release` can
2896 /// occur before or after `SetConstraints`. If before `SetConstraints`, the
2897 /// buffer collection won't require constraints from this node in order to
2898 /// allocate. If after `SetConstraints`, the constraints are retained and
2899 /// aggregated, despite the lack of `BufferCollection` connection at the
2900 /// time of constraints aggregation.
2901 ///
2902 /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
2903 ///
2904 /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
2905 /// end (without `Release` first) will trigger failure of the buffer
2906 /// collection. To close a `BufferCollectionTokenGroup` channel without
2907 /// failing the buffer collection, ensure that AllChildrenPresent() has been
2908 /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
2909 /// client end.
2910 ///
2911 /// If `Release` occurs before
2912 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
2913 /// buffer collection will fail (triggered by reception of `Release` without
2914 /// prior `AllChildrenPresent`). This is intentionally not analogous to how
2915 /// [`fuchsia.sysmem2/BufferCollection.Release`] without
2916 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
2917 /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
2918 /// close requires `AllChildrenPresent` (if not already sent), then
2919 /// `Release`, then close client end.
2920 ///
2921 /// If `Release` occurs after `AllChildrenPresent`, the children and all
2922 /// their constraints remain intact (just as they would if the
2923 /// `BufferCollectionTokenGroup` channel had remained open), and the client
2924 /// end close doesn't trigger buffer collection failure.
2925 ///
2926 /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
2927 ///
2928 /// For brevity, the per-channel-protocol paragraphs above ignore the
2929 /// separate failure domain created by
2930 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
2931 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
2932 /// unexpectedly closes (without `Release` first) and that client end is
2933 /// under a failure domain, instead of failing the whole buffer collection,
2934 /// the failure domain is failed, but the buffer collection itself is
2935 /// isolated from failure of the failure domain. Such failure domains can be
2936 /// nested, in which case only the inner-most failure domain in which the
2937 /// `Node` resides fails.
2938 pub fn r#release(&self) -> Result<(), fidl::Error> {
2939 BufferCollectionProxyInterface::r#release(self)
2940 }
2941
2942 /// Set a name for VMOs in this buffer collection.
2943 ///
2944 /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
2945 /// will be truncated to fit. The name of the vmo will be suffixed with the
2946 /// buffer index within the collection (if the suffix fits within
2947 /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
2948 /// listed in the inspect data.
2949 ///
2950 /// The name only affects VMOs allocated after the name is set; this call
2951 /// does not rename existing VMOs. If multiple clients set different names
2952 /// then the larger priority value will win. Setting a new name with the
2953 /// same priority as a prior name doesn't change the name.
2954 ///
2955 /// All table fields are currently required.
2956 ///
2957 /// + request `priority` The name is only set if this is the first `SetName`
2958 /// or if `priority` is greater than any previous `priority` value in
2959 /// prior `SetName` calls across all `Node`(s) of this buffer collection.
2960 /// + request `name` The name for VMOs created under this buffer collection.
2961 pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
2962 BufferCollectionProxyInterface::r#set_name(self, payload)
2963 }
2964
2965 /// Set information about the current client that can be used by sysmem to
2966 /// help diagnose leaking memory and allocation stalls waiting for a
2967 /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
2968 ///
2969 /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
2970 /// `Node`(s) derived from this `Node`, unless overriden by
2971 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
2972 /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
2973 ///
2974 /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
2975 /// `Allocator` is the most efficient way to ensure that all
2976 /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
2977 /// set, and is also more efficient than separately sending the same debug
2978 /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
2979 /// created [`fuchsia.sysmem2/Node`].
2980 ///
2981 /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
2982 /// indicate which client is closing their channel first, leading to subtree
2983 /// failure (which can be normal if the purpose of the subtree is over, but
2984 /// if happening earlier than expected, the client-channel-specific name can
2985 /// help diagnose where the failure is first coming from, from sysmem's
2986 /// point of view).
2987 ///
2988 /// All table fields are currently required.
2989 ///
2990 /// + request `name` This can be an arbitrary string, but the current
2991 /// process name (see `fsl::GetCurrentProcessName`) is a good default.
2992 /// + request `id` This can be an arbitrary id, but the current process ID
2993 /// (see `fsl::GetCurrentProcessKoid`) is a good default.
2994 pub fn r#set_debug_client_info(
2995 &self,
2996 mut payload: &NodeSetDebugClientInfoRequest,
2997 ) -> Result<(), fidl::Error> {
2998 BufferCollectionProxyInterface::r#set_debug_client_info(self, payload)
2999 }
3000
3001 /// Sysmem logs a warning if sysmem hasn't seen
3002 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
3003 /// within 5 seconds after creation of a new collection.
3004 ///
3005 /// Clients can call this method to change when the log is printed. If
3006 /// multiple client set the deadline, it's unspecified which deadline will
3007 /// take effect.
3008 ///
3009 /// In most cases the default works well.
3010 ///
3011 /// All table fields are currently required.
3012 ///
3013 /// + request `deadline` The time at which sysmem will start trying to log
3014 /// the warning, unless all constraints are with sysmem by then.
3015 pub fn r#set_debug_timeout_log_deadline(
3016 &self,
3017 mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
3018 ) -> Result<(), fidl::Error> {
3019 BufferCollectionProxyInterface::r#set_debug_timeout_log_deadline(self, payload)
3020 }
3021
3022 /// This enables verbose logging for the buffer collection.
3023 ///
3024 /// Verbose logging includes constraints set via
3025 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
3026 /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
3027 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
3028 /// the tree of `Node`(s).
3029 ///
3030 /// Normally sysmem prints only a single line complaint when aggregation
3031 /// fails, with just the specific detailed reason that aggregation failed,
3032 /// with little surrounding context. While this is often enough to diagnose
3033 /// a problem if only a small change was made and everything was working
3034 /// before the small change, it's often not particularly helpful for getting
3035 /// a new buffer collection to work for the first time. Especially with
3036 /// more complex trees of nodes, involving things like
3037 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
3038 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
3039 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
3040 /// subtrees of nodes, verbose logging may help in diagnosing what the tree
3041 /// looks like and why it's failing a logical allocation, or why a tree or
3042 /// subtree is failing sooner than expected.
3043 ///
3044 /// The intent of the extra logging is to be acceptable from a performance
3045 /// point of view, under the assumption that verbose logging is only enabled
3046 /// on a low number of buffer collections. If we're not tracking down a bug,
3047 /// we shouldn't send this message.
3048 pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
3049 BufferCollectionProxyInterface::r#set_verbose_logging(self)
3050 }
3051
3052 /// This gets a handle that can be used as a parameter to
3053 /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
3054 /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
3055 /// client obtained this handle from this `Node`.
3056 ///
3057 /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
3058 /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
3059 /// despite the two calls typically being on different channels.
3060 ///
3061 /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
3062 ///
3063 /// All table fields are currently required.
3064 ///
3065 /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
3066 /// different `Node` channel, to prove that the client obtained the handle
3067 /// from this `Node`.
3068 pub fn r#get_node_ref(
3069 &self,
3070 ) -> fidl::client::QueryResponseFut<
3071 NodeGetNodeRefResponse,
3072 fidl::encoding::DefaultFuchsiaResourceDialect,
3073 > {
3074 BufferCollectionProxyInterface::r#get_node_ref(self)
3075 }
3076
3077 /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
3078 /// rooted at a different child token of a common parent
3079 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
3080 /// passed-in `node_ref`.
3081 ///
3082 /// This call is for assisting with admission control de-duplication, and
3083 /// with debugging.
3084 ///
3085 /// The `node_ref` must be obtained using
3086 /// [`fuchsia.sysmem2/Node.GetNodeRef`].
3087 ///
3088 /// The `node_ref` can be a duplicated handle; it's not necessary to call
3089 /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
3090 ///
3091 /// If a calling token may not actually be a valid token at all due to a
3092 /// potentially hostile/untrusted provider of the token, call
3093 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
3094 /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
3095 /// never responds due to a calling token not being a real token (not really
3096 /// talking to sysmem). Another option is to call
3097 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
3098 /// which also validates the token along with converting it to a
3099 /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
3100 ///
3101 /// All table fields are currently required.
3102 ///
3103 /// - response `is_alternate`
3104 /// - true: The first parent node in common between the calling node and
3105 /// the `node_ref` `Node` is a `BufferCollectionTokenGroup`. This means
3106 /// that the calling `Node` and the `node_ref` `Node` will not have both
3107 /// their constraints apply - rather sysmem will choose one or the other
3108 /// of the constraints - never both. This is because only one child of
3109 /// a `BufferCollectionTokenGroup` is selected during logical
3110 /// allocation, with only that one child's subtree contributing to
3111 /// constraints aggregation.
3112 /// - false: The first parent node in common between the calling `Node`
3113 /// and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
3114 /// Currently, this means the first parent node in common is a
3115 /// `BufferCollectionToken` or `BufferCollection` (regardless of not
3116 /// `Release`ed). This means that the calling `Node` and the `node_ref`
3117 /// `Node` may have both their constraints apply during constraints
3118 /// aggregation of the logical allocation, if both `Node`(s) are
3119 /// selected by any parent `BufferCollectionTokenGroup`(s) involved. In
3120 /// this case, there is no `BufferCollectionTokenGroup` that will
3121 /// directly prevent the two `Node`(s) from both being selected and
3122 /// their constraints both aggregated, but even when false, one or both
3123 /// `Node`(s) may still be eliminated from consideration if one or both
3124 /// `Node`(s) has a direct or indirect parent
3125 /// `BufferCollectionTokenGroup` which selects a child subtree other
3126 /// than the subtree containing the calling `Node` or `node_ref` `Node`.
3127 /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
3128 /// associated with the same buffer collection as the calling `Node`.
3129 /// Another reason for this error is if the `node_ref` is an
3130 /// [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
3131 /// a real `node_ref` obtained from `GetNodeRef`.
3132 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
3133 /// `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
3134 /// the needed rights expected on a real `node_ref`.
3135 /// * No other failing status codes are returned by this call. However,
3136 /// sysmem may add additional codes in future, so the client should have
3137 /// sensible default handling for any failing status code.
3138 pub fn r#is_alternate_for(
3139 &self,
3140 mut payload: NodeIsAlternateForRequest,
3141 ) -> fidl::client::QueryResponseFut<
3142 NodeIsAlternateForResult,
3143 fidl::encoding::DefaultFuchsiaResourceDialect,
3144 > {
3145 BufferCollectionProxyInterface::r#is_alternate_for(self, payload)
3146 }
3147
3148 /// Get the buffer collection ID. This ID is also available from
3149 /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
3150 /// within the collection).
3151 ///
3152 /// This call is mainly useful in situations where we can't convey a
3153 /// [`fuchsia.sysmem2/BufferCollectionToken`] or
3154 /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
3155 /// handle, which can be joined back up with a `BufferCollection` client end
3156 /// that was created via a different path. Prefer to convey a
3157 /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
3158 ///
3159 /// Trusting a `buffer_collection_id` value from a source other than sysmem
3160 /// is analogous to trusting a koid value from a source other than zircon.
3161 /// Both should be avoided unless really necessary, and both require
3162 /// caution. In some situations it may be reasonable to refer to a
3163 /// pre-established `BufferCollection` by `buffer_collection_id` via a
3164 /// protocol for efficiency reasons, but an incoming value purporting to be
3165 /// a `buffer_collection_id` is not sufficient alone to justify granting the
3166 /// sender of the `buffer_collection_id` any capability. The sender must
3167 /// first prove to a receiver that the sender has/had a VMO or has/had a
3168 /// `BufferCollectionToken` to the same collection by sending a handle that
3169 /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
3170 /// `buffer_collection_id` value. The receiver should take care to avoid
3171 /// assuming that a sender had a `BufferCollectionToken` in cases where the
3172 /// sender has only proven that the sender had a VMO.
3173 ///
3174 /// - response `buffer_collection_id` This ID is unique per buffer
3175 /// collection per boot. Each buffer is uniquely identified by the
3176 /// `buffer_collection_id` and `buffer_index` together.
3177 pub fn r#get_buffer_collection_id(
3178 &self,
3179 ) -> fidl::client::QueryResponseFut<
3180 NodeGetBufferCollectionIdResponse,
3181 fidl::encoding::DefaultFuchsiaResourceDialect,
3182 > {
3183 BufferCollectionProxyInterface::r#get_buffer_collection_id(self)
3184 }
3185
3186 /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
3187 /// created after this message to weak, which means that a client's `Node`
3188 /// client end (or a child created after this message) is not alone
3189 /// sufficient to keep allocated VMOs alive.
3190 ///
3191 /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
3192 /// `close_weak_asap`.
3193 ///
3194 /// This message is only permitted before the `Node` becomes ready for
3195 /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
3196 /// * `BufferCollectionToken`: any time
3197 /// * `BufferCollection`: before `SetConstraints`
3198 /// * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
3199 ///
3200 /// Currently, no conversion from strong `Node` to weak `Node` after ready
3201 /// for allocation is provided, but a client can simulate that by creating
3202 /// an additional `Node` before allocation and setting that additional
3203 /// `Node` to weak, and then potentially at some point later sending
3204 /// `Release` and closing the client end of the client's strong `Node`, but
3205 /// keeping the client's weak `Node`.
3206 ///
3207 /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
3208 /// collection failure (all `Node` client end(s) will see
3209 /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
3210 /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
3211 /// this situation until all `Node`(s) are ready for allocation. For initial
3212 /// allocation to succeed, at least one strong `Node` is required to exist
3213 /// at allocation time, but after that client receives VMO handles, that
3214 /// client can `BufferCollection.Release` and close the client end without
3215 /// causing this type of failure.
3216 ///
3217 /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
3218 /// imply `SetWeakOk` with `for_children_also` true, which can be sent
3219 /// separately as appropriate.
3220 pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
3221 BufferCollectionProxyInterface::r#set_weak(self)
3222 }
3223
3224 /// This indicates to sysmem that the client is prepared to pay attention to
3225 /// `close_weak_asap`.
3226 ///
3227 /// If sent, this message must be before
3228 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
3229 ///
3230 /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
3231 /// send this message before `WaitForAllBuffersAllocated`, or a parent
3232 /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
3233 /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
3234 /// trigger buffer collection failure.
3235 ///
3236 /// This message is necessary because weak sysmem VMOs have not always been
3237 /// a thing, so older clients are not aware of the need to pay attention to
3238 /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
3239 /// sysmem weak VMO handles asap. By having this message and requiring
3240 /// participants to indicate their acceptance of this aspect of the overall
3241 /// protocol, we avoid situations where an older client is delivered a weak
3242 /// VMO without any way for sysmem to get that VMO to close quickly later
3243 /// (and on a per-buffer basis).
3244 ///
3245 /// A participant that doesn't handle `close_weak_asap` and also doesn't
3246 /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
3247 /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
3248 /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
3249 /// same participant has a child/delegate which does retrieve VMOs, that
3250 /// child/delegate will need to send `SetWeakOk` before
3251 /// `WaitForAllBuffersAllocated`.
3252 ///
3253 /// + request `for_child_nodes_also` If present and true, this means direct
3254 /// child nodes of this node created after this message plus all
3255 /// descendants of those nodes will behave as if `SetWeakOk` was sent on
3256 /// those nodes. Any child node of this node that was created before this
3257 /// message is not included. This setting is "sticky" in the sense that a
3258 /// subsequent `SetWeakOk` without this bool set to true does not reset
3259 /// the server-side bool. If this creates a problem for a participant, a
3260 /// workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
3261 /// tokens instead, as appropriate. A participant should only set
3262 /// `for_child_nodes_also` true if the participant can really promise to
3263 /// obey `close_weak_asap` both for its own weak VMO handles, and for all
3264 /// weak VMO handles held by participants holding the corresponding child
3265 /// `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
3266 /// which are using sysmem(1) can be weak, despite the clients of those
3267 /// sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
3268 /// direct way to find out about `close_weak_asap`. This only applies to
3269 /// descendents of this `Node` which are using sysmem(1), not to this
3270 /// `Node` when converted directly from a sysmem2 token to a sysmem(1)
3271 /// token, which will fail allocation unless an ancestor of this `Node`
3272 /// specified `for_child_nodes_also` true.
3273 pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
3274 BufferCollectionProxyInterface::r#set_weak_ok(self, payload)
3275 }
3276
3277 /// The server_end will be closed after this `Node` and any child nodes have
3278 /// have released their buffer counts, making those counts available for
3279 /// reservation by a different `Node` via
3280 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
3281 ///
3282 /// The `Node` buffer counts may not be released until the entire tree of
3283 /// `Node`(s) is closed or failed, because
3284 /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
3285 /// does not immediately un-reserve the `Node` buffer counts. Instead, the
3286 /// `Node` buffer counts remain reserved until the orphaned node is later
3287 /// cleaned up.
3288 ///
3289 /// If the `Node` exceeds a fairly large number of attached eventpair server
3290 /// ends, a log message will indicate this and the `Node` (and the
3291 /// appropriate) sub-tree will fail.
3292 ///
3293 /// The `server_end` will remain open when
3294 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
3295 /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
3296 /// [`fuchsia.sysmem2/BufferCollection`].
3297 ///
3298 /// This message can also be used with a
3299 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
3300 pub fn r#attach_node_tracking(
3301 &self,
3302 mut payload: NodeAttachNodeTrackingRequest,
3303 ) -> Result<(), fidl::Error> {
3304 BufferCollectionProxyInterface::r#attach_node_tracking(self, payload)
3305 }
3306
3307 /// Provide [`fuchsia.sysmem2/BufferCollectionConstraints`] to the buffer
3308 /// collection.
3309 ///
3310 /// A participant may only call
3311 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] up to once per
3312 /// [`fuchsia.sysmem2/BufferCollection`].
3313 ///
3314 /// For buffer allocation to be attempted, all holders of a
3315 /// `BufferCollection` client end need to call `SetConstraints` before
3316 /// sysmem will attempt to allocate buffers.
3317 ///
3318 /// + request `constraints` These are the constraints on the buffer
3319 /// collection imposed by the sending client/participant. The
3320 /// `constraints` field is not required to be set. If not set, the client
3321 /// is not setting any actual constraints, but is indicating that the
3322 /// client has no constraints to set. A client that doesn't set the
3323 /// `constraints` field won't receive any VMO handles, but can still find
3324 /// out how many buffers were allocated and can still refer to buffers by
3325 /// their `buffer_index`.
3326 pub fn r#set_constraints(
3327 &self,
3328 mut payload: BufferCollectionSetConstraintsRequest,
3329 ) -> Result<(), fidl::Error> {
3330 BufferCollectionProxyInterface::r#set_constraints(self, payload)
3331 }
3332
3333 /// Wait until all buffers are allocated.
3334 ///
3335 /// This FIDL call completes when buffers have been allocated, or completes
3336 /// with some failure detail if allocation has been attempted but failed.
3337 ///
3338 /// The following must occur before buffers will be allocated:
3339 /// * All [`fuchsia.sysmem2/BufferCollectionToken`](s) of the buffer
3340 /// collection must be turned in via `BindSharedCollection` to get a
3341 /// [`fuchsia.sysmem2/BufferCollection`] (for brevity, this is assuming
3342 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`] isn't being used),
3343 /// or have had [`fuchsia.sysmem2/BufferCollectionToken.Release`] sent
3344 /// to them.
3345 /// * All [`fuchsia.sysmem2/BufferCollection`](s) of the buffer collection
3346 /// must have had [`fuchsia.sysmem2/BufferCollection.SetConstraints`]
3347 /// sent to them, or had [`fuchsia.sysmem2/BufferCollection.Release`]
3348 /// sent to them.
3349 ///
3350 /// - result `buffer_collection_info` The VMO handles and other related
3351 /// info.
3352 /// * error `[fuchsia.sysmem2/Error.NO_MEMORY]` The request is valid but
3353 /// cannot be fulfilled due to resource exhaustion.
3354 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION`] The request is
3355 /// malformed.
3356 /// * error `[fuchsia.sysmem2/Error.CONSTRAINTS_INTERSECTION_EMPTY`] The
3357 /// request is valid but cannot be satisfied, perhaps due to hardware
3358 /// limitations. This can happen if participants have incompatible
3359 /// constraints (empty intersection, roughly speaking). See the log for
3360 /// more info. In cases where a participant could potentially be treated
3361 /// as optional, see [`BufferCollectionTokenGroup`]. When using
3362 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`], this will be the
3363 /// error code if there aren't enough buffers in the pre-existing
3364 /// collection to satisfy the constraints set on the attached token and
3365 /// any sub-tree of tokens derived from the attached token.
3366 pub fn r#wait_for_all_buffers_allocated(
3367 &self,
3368 ) -> fidl::client::QueryResponseFut<
3369 BufferCollectionWaitForAllBuffersAllocatedResult,
3370 fidl::encoding::DefaultFuchsiaResourceDialect,
3371 > {
3372 BufferCollectionProxyInterface::r#wait_for_all_buffers_allocated(self)
3373 }
3374
3375 /// Checks whether all the buffers have been allocated, in a polling
3376 /// fashion.
3377 ///
3378 /// * If the buffer collection has been allocated, returns success.
3379 /// * If the buffer collection failed allocation, returns the same
3380 /// [`fuchsia.sysmem2/Error`] as
3381 /// [`fuchsia.sysmem2/BufferCollection/WaitForAllBuffersAllocated`] would
3382 /// return.
3383 /// * error [`fuchsia.sysmem2/Error.PENDING`] The buffer collection hasn't
3384 /// attempted allocation yet. This means that WaitForAllBuffersAllocated
3385 /// would not respond quickly.
3386 pub fn r#check_all_buffers_allocated(
3387 &self,
3388 ) -> fidl::client::QueryResponseFut<
3389 BufferCollectionCheckAllBuffersAllocatedResult,
3390 fidl::encoding::DefaultFuchsiaResourceDialect,
3391 > {
3392 BufferCollectionProxyInterface::r#check_all_buffers_allocated(self)
3393 }
3394
3395 /// Create a new token to add a new participant to an existing logical
3396 /// buffer collection, if the existing collection's buffer counts,
3397 /// constraints, and participants allow.
3398 ///
3399 /// This can be useful in replacing a failed participant, and/or in
3400 /// adding/re-adding a participant after buffers have already been
3401 /// allocated.
3402 ///
3403 /// When [`fuchsia.sysmem2/BufferCollection.AttachToken`] is used, the sub
3404 /// tree rooted at the attached [`fuchsia.sysmem2/BufferCollectionToken`]
3405 /// goes through the normal procedure of setting constraints or closing
3406 /// [`fuchsia.sysmem2/Node`](s), and then appearing to allocate buffers from
3407 /// clients' point of view, despite the possibility that all the buffers
3408 /// were actually allocated previously. This process is called "logical
3409 /// allocation". Most instances of "allocation" in docs for other messages
3410 /// can also be read as "allocation or logical allocation" while remaining
3411 /// valid, but we just say "allocation" in most places for brevity/clarity
3412 /// of explanation, with the details of "logical allocation" left for the
3413 /// docs here on `AttachToken`.
3414 ///
3415 /// Failure of an attached `Node` does not propagate to the parent of the
3416 /// attached `Node`. More generally, failure of a child `Node` is blocked
3417 /// from reaching its parent `Node` if the child is attached, or if the
3418 /// child is dispensable and the failure occurred after logical allocation
3419 /// (see [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`]).
3420 ///
3421 /// A participant may in some scenarios choose to initially use a
3422 /// dispensable token for a given instance of a delegate participant, and
3423 /// then later if the first instance of that delegate participant fails, a
3424 /// new second instance of that delegate participant my be given a token
3425 /// created with `AttachToken`.
3426 ///
3427 /// From the point of view of the [`fuchsia.sysmem2/BufferCollectionToken`]
3428 /// client end, the token acts like any other token. The client can
3429 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] the token as needed,
3430 /// and can send the token to a different process/participant. The
3431 /// `BufferCollectionToken` `Node` should be converted to a
3432 /// `BufferCollection` `Node` as normal by sending
3433 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or can be closed
3434 /// without causing subtree failure by sending
3435 /// [`fuchsia.sysmem2/BufferCollectionToken.Release`]. Assuming the former,
3436 /// the [`fuchsia.sysmem2/BufferCollection.SetConstraints`] message or
3437 /// [`fuchsia.sysmem2/BufferCollection.Release`] message should be sent to
3438 /// the `BufferCollection`.
3439 ///
3440 /// Within the subtree, a success result from
3441 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`] means
3442 /// the subtree participants' constraints were satisfiable using the
3443 /// already-existing buffer collection, the already-established
3444 /// [`fuchsia.sysmem2/BufferCollectionInfo`] including image format
3445 /// constraints, and the already-existing other participants (already added
3446 /// via successful logical allocation) and their specified buffer counts in
3447 /// their constraints. A failure result means the new participants'
3448 /// constraints cannot be satisfied using the existing buffer collection and
3449 /// its already-added participants. Creating a new collection instead may
3450 /// allow all participants' constraints to be satisfied, assuming
3451 /// `SetDispensable` is used in place of `AttachToken`, or a normal token is
3452 /// used.
3453 ///
3454 /// A token created with `AttachToken` performs constraints aggregation with
3455 /// all constraints currently in effect on the buffer collection, plus the
3456 /// attached token under consideration plus child tokens under the attached
3457 /// token which are not themselves an attached token or under such a token.
3458 /// Further subtrees under this subtree are considered for logical
3459 /// allocation only after this subtree has completed logical allocation.
3460 ///
3461 /// Assignment of existing buffers to participants'
3462 /// [`fuchsia.sysmem2/BufferCollectionConstraints.min_buffer_count_for_camping`]
3463 /// etc is first-come first-served, but a child can't logically allocate
3464 /// before all its parents have sent `SetConstraints`.
3465 ///
3466 /// See also [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`], which
3467 /// in contrast to `AttachToken`, has the created token `Node` + child
3468 /// `Node`(s) (in the created subtree but not in any subtree under this
3469 /// subtree) participate in constraints aggregation along with its parent
3470 /// during the parent's allocation or logical allocation.
3471 ///
3472 /// Similar to [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`], the
3473 /// newly created token needs to be [`fuchsia.sysmem2/Node.Sync`]ed to
3474 /// sysmem before the new token can be passed to `BindSharedCollection`. The
3475 /// `Sync` of the new token can be accomplished with
3476 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after converting the created
3477 /// `BufferCollectionToken` to a `BufferCollection`. Alternately,
3478 /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on the new token also
3479 /// works. Or using [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`]
3480 /// works. As usual, a `BufferCollectionToken.Sync` can be started after any
3481 /// `BufferCollectionToken.Duplicate` messages have been sent via the newly
3482 /// created token, to also sync those additional tokens to sysmem using a
3483 /// single round-trip.
3484 ///
3485 /// All table fields are currently required.
3486 ///
3487 /// + request `rights_attentuation_mask` This allows attenuating the VMO
3488 /// rights of the subtree. These values for `rights_attenuation_mask`
3489 /// result in no attenuation (note that 0 is not on this list):
3490 /// + ZX_RIGHT_SAME_RIGHTS (preferred)
3491 /// + 0xFFFFFFFF (this is reasonable when an attenuation mask is computed)
3492 /// + request `token_request` The server end of the `BufferCollectionToken`
3493 /// channel. The client retains the client end.
3494 pub fn r#attach_token(
3495 &self,
3496 mut payload: BufferCollectionAttachTokenRequest,
3497 ) -> Result<(), fidl::Error> {
3498 BufferCollectionProxyInterface::r#attach_token(self, payload)
3499 }
3500
3501 /// Set up an eventpair to be signalled (`ZX_EVENTPAIR_PEER_CLOSED`) when
3502 /// buffers have been allocated and only the specified number of buffers (or
3503 /// fewer) remain in the buffer collection.
3504 ///
3505 /// [`fuchsia.sysmem2/BufferCollection.AttachLifetimeTracking`] allows a
3506 /// client to wait until an old buffer collection is fully or mostly
3507 /// deallocated before attempting allocation of a new buffer collection. The
3508 /// eventpair is only signalled when the buffers of this collection have
3509 /// been fully deallocated (not just un-referenced by clients, but all the
3510 /// memory consumed by those buffers has been fully reclaimed/recycled), or
3511 /// when allocation or logical allocation fails for the tree or subtree
3512 /// including this [`fuchsia.sysmem2/BufferCollection`].
3513 ///
3514 /// The eventpair won't be signalled until allocation or logical allocation
3515 /// has completed; until then, the collection's current buffer count is
3516 /// ignored.
3517 ///
3518 /// If logical allocation fails for an attached subtree (using
3519 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]), the server end of the
3520 /// eventpair will close during that failure regardless of the number of
3521 /// buffers potenitally allocated in the overall buffer collection. This is
3522 /// for logical allocation consistency with normal allocation.
3523 ///
3524 /// The lifetime signalled by this event includes asynchronous cleanup of
3525 /// allocated buffers, and this asynchronous cleanup cannot occur until all
3526 /// holders of VMO handles to the buffers have closed those VMO handles.
3527 /// Therefore, clients should take care not to become blocked forever
3528 /// waiting for `ZX_EVENTPAIR_PEER_CLOSED` to be signalled if any of the
3529 /// participants using the logical buffer collection (including the waiter
3530 /// itself) are less trusted, less reliable, or potentially blocked by the
3531 /// wait itself. Waiting asynchronously is recommended. Setting a deadline
3532 /// for the client wait may be prudent, depending on details of how the
3533 /// collection and/or its VMOs are used or shared. Failure to allocate a
3534 /// new/replacement buffer collection is better than getting stuck forever.
3535 ///
3536 /// The sysmem server itself intentionally does not perform any waiting on
3537 /// already-failed collections' VMOs to finish cleaning up before attempting
3538 /// a new allocation, and the sysmem server intentionally doesn't retry
3539 /// allocation if a new allocation fails due to out of memory, even if that
3540 /// failure is potentially due to continued existence of an old collection's
3541 /// VMOs. This `AttachLifetimeTracking` message is how an initiator can
3542 /// mitigate too much overlap of old VMO lifetimes with new VMO lifetimes,
3543 /// as long as the waiting client is careful to not create a deadlock.
3544 ///
3545 /// Continued existence of old collections that are still cleaning up is not
3546 /// the only reason that a new allocation may fail due to insufficient
3547 /// memory, even if the new allocation is allocating physically contiguous
3548 /// buffers. Overall system memory pressure can also be the cause of failure
3549 /// to allocate a new collection. See also
3550 /// [`fuchsia.memorypressure/Provider`].
3551 ///
3552 /// `AttachLifetimeTracking` is meant to be compatible with other protocols
3553 /// with a similar `AttachLifetimeTracking` message; duplicates of the same
3554 /// `eventpair` handle (server end) can be sent via more than one
3555 /// `AttachLifetimeTracking` message to different protocols, and the
3556 /// `ZX_EVENTPAIR_PEER_CLOSED` will be signalled for the client end when all
3557 /// the conditions are met (all holders of duplicates have closed their
3558 /// server end handle(s)). Also, thanks to how eventpair endponts work, the
3559 /// client end can (also) be duplicated without preventing the
3560 /// `ZX_EVENTPAIR_PEER_CLOSED` signal.
3561 ///
3562 /// The server intentionally doesn't "trust" any signals set on the
3563 /// `server_end`. This mechanism intentionally uses only
3564 /// `ZX_EVENTPAIR_PEER_CLOSED` set on the client end, which can't be set
3565 /// "early", and is only set when all handles to the server end eventpair
3566 /// are closed. No meaning is associated with any of the other signals, and
3567 /// clients should ignore any other signal bits on either end of the
3568 /// `eventpair`.
3569 ///
3570 /// The `server_end` may lack `ZX_RIGHT_SIGNAL` or `ZX_RIGHT_SIGNAL_PEER`,
3571 /// but must have `ZX_RIGHT_DUPLICATE` (and must have `ZX_RIGHT_TRANSFER` to
3572 /// transfer without causing `BufferCollection` channel failure).
3573 ///
3574 /// All table fields are currently required.
3575 ///
3576 /// + request `server_end` This eventpair handle will be closed by the
3577 /// sysmem server when buffers have been allocated initially and the
3578 /// number of buffers is then less than or equal to `buffers_remaining`.
3579 /// + request `buffers_remaining` Wait for all but `buffers_remaining` (or
3580 /// fewer) buffers to be fully deallocated. A number greater than zero can
3581 /// be useful in situations where a known number of buffers are
3582 /// intentionally not closed so that the data can continue to be used,
3583 /// such as for keeping the last available video frame displayed in the UI
3584 /// even if the video stream was using protected output buffers. It's
3585 /// outside the scope of the `BufferCollection` interface (at least for
3586 /// now) to determine how many buffers may be held without closing, but
3587 /// it'll typically be in the range 0-2.
3588 pub fn r#attach_lifetime_tracking(
3589 &self,
3590 mut payload: BufferCollectionAttachLifetimeTrackingRequest,
3591 ) -> Result<(), fidl::Error> {
3592 BufferCollectionProxyInterface::r#attach_lifetime_tracking(self, payload)
3593 }
3594}
3595
3596impl BufferCollectionProxyInterface for BufferCollectionProxy {
3597 type SyncResponseFut =
3598 fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect>;
3599 fn r#sync(&self) -> Self::SyncResponseFut {
3600 fn _decode(
3601 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
3602 ) -> Result<(), fidl::Error> {
3603 let _response = fidl::client::decode_transaction_body::<
3604 fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
3605 fidl::encoding::DefaultFuchsiaResourceDialect,
3606 0x11ac2555cf575b54,
3607 >(_buf?)?
3608 .into_result::<BufferCollectionMarker>("sync")?;
3609 Ok(_response)
3610 }
3611 self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, ()>(
3612 (),
3613 0x11ac2555cf575b54,
3614 fidl::encoding::DynamicFlags::FLEXIBLE,
3615 _decode,
3616 )
3617 }
3618
3619 fn r#release(&self) -> Result<(), fidl::Error> {
3620 self.client.send::<fidl::encoding::EmptyPayload>(
3621 (),
3622 0x6a5cae7d6d6e04c6,
3623 fidl::encoding::DynamicFlags::FLEXIBLE,
3624 )
3625 }
3626
3627 fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
3628 self.client.send::<NodeSetNameRequest>(
3629 payload,
3630 0xb41f1624f48c1e9,
3631 fidl::encoding::DynamicFlags::FLEXIBLE,
3632 )
3633 }
3634
3635 fn r#set_debug_client_info(
3636 &self,
3637 mut payload: &NodeSetDebugClientInfoRequest,
3638 ) -> Result<(), fidl::Error> {
3639 self.client.send::<NodeSetDebugClientInfoRequest>(
3640 payload,
3641 0x5cde8914608d99b1,
3642 fidl::encoding::DynamicFlags::FLEXIBLE,
3643 )
3644 }
3645
3646 fn r#set_debug_timeout_log_deadline(
3647 &self,
3648 mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
3649 ) -> Result<(), fidl::Error> {
3650 self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
3651 payload,
3652 0x716b0af13d5c0806,
3653 fidl::encoding::DynamicFlags::FLEXIBLE,
3654 )
3655 }
3656
3657 fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
3658 self.client.send::<fidl::encoding::EmptyPayload>(
3659 (),
3660 0x5209c77415b4dfad,
3661 fidl::encoding::DynamicFlags::FLEXIBLE,
3662 )
3663 }
3664
3665 type GetNodeRefResponseFut = fidl::client::QueryResponseFut<
3666 NodeGetNodeRefResponse,
3667 fidl::encoding::DefaultFuchsiaResourceDialect,
3668 >;
3669 fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut {
3670 fn _decode(
3671 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
3672 ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
3673 let _response = fidl::client::decode_transaction_body::<
3674 fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
3675 fidl::encoding::DefaultFuchsiaResourceDialect,
3676 0x5b3d0e51614df053,
3677 >(_buf?)?
3678 .into_result::<BufferCollectionMarker>("get_node_ref")?;
3679 Ok(_response)
3680 }
3681 self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, NodeGetNodeRefResponse>(
3682 (),
3683 0x5b3d0e51614df053,
3684 fidl::encoding::DynamicFlags::FLEXIBLE,
3685 _decode,
3686 )
3687 }
3688
3689 type IsAlternateForResponseFut = fidl::client::QueryResponseFut<
3690 NodeIsAlternateForResult,
3691 fidl::encoding::DefaultFuchsiaResourceDialect,
3692 >;
3693 fn r#is_alternate_for(
3694 &self,
3695 mut payload: NodeIsAlternateForRequest,
3696 ) -> Self::IsAlternateForResponseFut {
3697 fn _decode(
3698 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
3699 ) -> Result<NodeIsAlternateForResult, fidl::Error> {
3700 let _response = fidl::client::decode_transaction_body::<
3701 fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
3702 fidl::encoding::DefaultFuchsiaResourceDialect,
3703 0x3a58e00157e0825,
3704 >(_buf?)?
3705 .into_result::<BufferCollectionMarker>("is_alternate_for")?;
3706 Ok(_response.map(|x| x))
3707 }
3708 self.client.send_query_and_decode::<NodeIsAlternateForRequest, NodeIsAlternateForResult>(
3709 &mut payload,
3710 0x3a58e00157e0825,
3711 fidl::encoding::DynamicFlags::FLEXIBLE,
3712 _decode,
3713 )
3714 }
3715
3716 type GetBufferCollectionIdResponseFut = fidl::client::QueryResponseFut<
3717 NodeGetBufferCollectionIdResponse,
3718 fidl::encoding::DefaultFuchsiaResourceDialect,
3719 >;
3720 fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut {
3721 fn _decode(
3722 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
3723 ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
3724 let _response = fidl::client::decode_transaction_body::<
3725 fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
3726 fidl::encoding::DefaultFuchsiaResourceDialect,
3727 0x77d19a494b78ba8c,
3728 >(_buf?)?
3729 .into_result::<BufferCollectionMarker>("get_buffer_collection_id")?;
3730 Ok(_response)
3731 }
3732 self.client.send_query_and_decode::<
3733 fidl::encoding::EmptyPayload,
3734 NodeGetBufferCollectionIdResponse,
3735 >(
3736 (),
3737 0x77d19a494b78ba8c,
3738 fidl::encoding::DynamicFlags::FLEXIBLE,
3739 _decode,
3740 )
3741 }
3742
3743 fn r#set_weak(&self) -> Result<(), fidl::Error> {
3744 self.client.send::<fidl::encoding::EmptyPayload>(
3745 (),
3746 0x22dd3ea514eeffe1,
3747 fidl::encoding::DynamicFlags::FLEXIBLE,
3748 )
3749 }
3750
3751 fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
3752 self.client.send::<NodeSetWeakOkRequest>(
3753 &mut payload,
3754 0x38a44fc4d7724be9,
3755 fidl::encoding::DynamicFlags::FLEXIBLE,
3756 )
3757 }
3758
3759 fn r#attach_node_tracking(
3760 &self,
3761 mut payload: NodeAttachNodeTrackingRequest,
3762 ) -> Result<(), fidl::Error> {
3763 self.client.send::<NodeAttachNodeTrackingRequest>(
3764 &mut payload,
3765 0x3f22f2a293d3cdac,
3766 fidl::encoding::DynamicFlags::FLEXIBLE,
3767 )
3768 }
3769
3770 fn r#set_constraints(
3771 &self,
3772 mut payload: BufferCollectionSetConstraintsRequest,
3773 ) -> Result<(), fidl::Error> {
3774 self.client.send::<BufferCollectionSetConstraintsRequest>(
3775 &mut payload,
3776 0x1fde0f19d650197b,
3777 fidl::encoding::DynamicFlags::FLEXIBLE,
3778 )
3779 }
3780
3781 type WaitForAllBuffersAllocatedResponseFut = fidl::client::QueryResponseFut<
3782 BufferCollectionWaitForAllBuffersAllocatedResult,
3783 fidl::encoding::DefaultFuchsiaResourceDialect,
3784 >;
3785 fn r#wait_for_all_buffers_allocated(&self) -> Self::WaitForAllBuffersAllocatedResponseFut {
3786 fn _decode(
3787 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
3788 ) -> Result<BufferCollectionWaitForAllBuffersAllocatedResult, fidl::Error> {
3789 let _response = fidl::client::decode_transaction_body::<
3790 fidl::encoding::FlexibleResultType<
3791 BufferCollectionWaitForAllBuffersAllocatedResponse,
3792 Error,
3793 >,
3794 fidl::encoding::DefaultFuchsiaResourceDialect,
3795 0x62300344b61404e,
3796 >(_buf?)?
3797 .into_result::<BufferCollectionMarker>("wait_for_all_buffers_allocated")?;
3798 Ok(_response.map(|x| x))
3799 }
3800 self.client.send_query_and_decode::<
3801 fidl::encoding::EmptyPayload,
3802 BufferCollectionWaitForAllBuffersAllocatedResult,
3803 >(
3804 (),
3805 0x62300344b61404e,
3806 fidl::encoding::DynamicFlags::FLEXIBLE,
3807 _decode,
3808 )
3809 }
3810
3811 type CheckAllBuffersAllocatedResponseFut = fidl::client::QueryResponseFut<
3812 BufferCollectionCheckAllBuffersAllocatedResult,
3813 fidl::encoding::DefaultFuchsiaResourceDialect,
3814 >;
3815 fn r#check_all_buffers_allocated(&self) -> Self::CheckAllBuffersAllocatedResponseFut {
3816 fn _decode(
3817 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
3818 ) -> Result<BufferCollectionCheckAllBuffersAllocatedResult, fidl::Error> {
3819 let _response = fidl::client::decode_transaction_body::<
3820 fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
3821 fidl::encoding::DefaultFuchsiaResourceDialect,
3822 0x35a5fe77ce939c10,
3823 >(_buf?)?
3824 .into_result::<BufferCollectionMarker>("check_all_buffers_allocated")?;
3825 Ok(_response.map(|x| x))
3826 }
3827 self.client.send_query_and_decode::<
3828 fidl::encoding::EmptyPayload,
3829 BufferCollectionCheckAllBuffersAllocatedResult,
3830 >(
3831 (),
3832 0x35a5fe77ce939c10,
3833 fidl::encoding::DynamicFlags::FLEXIBLE,
3834 _decode,
3835 )
3836 }
3837
3838 fn r#attach_token(
3839 &self,
3840 mut payload: BufferCollectionAttachTokenRequest,
3841 ) -> Result<(), fidl::Error> {
3842 self.client.send::<BufferCollectionAttachTokenRequest>(
3843 &mut payload,
3844 0x46ac7d0008492982,
3845 fidl::encoding::DynamicFlags::FLEXIBLE,
3846 )
3847 }
3848
3849 fn r#attach_lifetime_tracking(
3850 &self,
3851 mut payload: BufferCollectionAttachLifetimeTrackingRequest,
3852 ) -> Result<(), fidl::Error> {
3853 self.client.send::<BufferCollectionAttachLifetimeTrackingRequest>(
3854 &mut payload,
3855 0x3ecb510113116dcf,
3856 fidl::encoding::DynamicFlags::FLEXIBLE,
3857 )
3858 }
3859}
3860
3861pub struct BufferCollectionEventStream {
3862 event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
3863}
3864
3865impl std::marker::Unpin for BufferCollectionEventStream {}
3866
3867impl futures::stream::FusedStream for BufferCollectionEventStream {
3868 fn is_terminated(&self) -> bool {
3869 self.event_receiver.is_terminated()
3870 }
3871}
3872
3873impl futures::Stream for BufferCollectionEventStream {
3874 type Item = Result<BufferCollectionEvent, fidl::Error>;
3875
3876 fn poll_next(
3877 mut self: std::pin::Pin<&mut Self>,
3878 cx: &mut std::task::Context<'_>,
3879 ) -> std::task::Poll<Option<Self::Item>> {
3880 match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
3881 &mut self.event_receiver,
3882 cx
3883 )?) {
3884 Some(buf) => std::task::Poll::Ready(Some(BufferCollectionEvent::decode(buf))),
3885 None => std::task::Poll::Ready(None),
3886 }
3887 }
3888}
3889
3890#[derive(Debug)]
3891pub enum BufferCollectionEvent {
3892 #[non_exhaustive]
3893 _UnknownEvent {
3894 /// Ordinal of the event that was sent.
3895 ordinal: u64,
3896 },
3897}
3898
3899impl BufferCollectionEvent {
3900 /// Decodes a message buffer as a [`BufferCollectionEvent`].
3901 fn decode(
3902 mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
3903 ) -> Result<BufferCollectionEvent, fidl::Error> {
3904 let (bytes, _handles) = buf.split_mut();
3905 let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
3906 debug_assert_eq!(tx_header.tx_id, 0);
3907 match tx_header.ordinal {
3908 _ if tx_header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
3909 Ok(BufferCollectionEvent::_UnknownEvent { ordinal: tx_header.ordinal })
3910 }
3911 _ => Err(fidl::Error::UnknownOrdinal {
3912 ordinal: tx_header.ordinal,
3913 protocol_name:
3914 <BufferCollectionMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
3915 }),
3916 }
3917 }
3918}
3919
3920/// A Stream of incoming requests for fuchsia.sysmem2/BufferCollection.
3921pub struct BufferCollectionRequestStream {
3922 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
3923 is_terminated: bool,
3924}
3925
3926impl std::marker::Unpin for BufferCollectionRequestStream {}
3927
3928impl futures::stream::FusedStream for BufferCollectionRequestStream {
3929 fn is_terminated(&self) -> bool {
3930 self.is_terminated
3931 }
3932}
3933
3934impl fidl::endpoints::RequestStream for BufferCollectionRequestStream {
3935 type Protocol = BufferCollectionMarker;
3936 type ControlHandle = BufferCollectionControlHandle;
3937
3938 fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
3939 Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
3940 }
3941
3942 fn control_handle(&self) -> Self::ControlHandle {
3943 BufferCollectionControlHandle { inner: self.inner.clone() }
3944 }
3945
3946 fn into_inner(
3947 self,
3948 ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
3949 {
3950 (self.inner, self.is_terminated)
3951 }
3952
3953 fn from_inner(
3954 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
3955 is_terminated: bool,
3956 ) -> Self {
3957 Self { inner, is_terminated }
3958 }
3959}
3960
3961impl futures::Stream for BufferCollectionRequestStream {
3962 type Item = Result<BufferCollectionRequest, fidl::Error>;
3963
3964 fn poll_next(
3965 mut self: std::pin::Pin<&mut Self>,
3966 cx: &mut std::task::Context<'_>,
3967 ) -> std::task::Poll<Option<Self::Item>> {
3968 let this = &mut *self;
3969 if this.inner.check_shutdown(cx) {
3970 this.is_terminated = true;
3971 return std::task::Poll::Ready(None);
3972 }
3973 if this.is_terminated {
3974 panic!("polled BufferCollectionRequestStream after completion");
3975 }
3976 fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
3977 |bytes, handles| {
3978 match this.inner.channel().read_etc(cx, bytes, handles) {
3979 std::task::Poll::Ready(Ok(())) => {}
3980 std::task::Poll::Pending => return std::task::Poll::Pending,
3981 std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
3982 this.is_terminated = true;
3983 return std::task::Poll::Ready(None);
3984 }
3985 std::task::Poll::Ready(Err(e)) => {
3986 return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
3987 e.into(),
3988 ))));
3989 }
3990 }
3991
3992 // A message has been received from the channel
3993 let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
3994
3995 std::task::Poll::Ready(Some(match header.ordinal {
3996 0x11ac2555cf575b54 => {
3997 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
3998 let mut req = fidl::new_empty!(
3999 fidl::encoding::EmptyPayload,
4000 fidl::encoding::DefaultFuchsiaResourceDialect
4001 );
4002 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
4003 let control_handle =
4004 BufferCollectionControlHandle { inner: this.inner.clone() };
4005 Ok(BufferCollectionRequest::Sync {
4006 responder: BufferCollectionSyncResponder {
4007 control_handle: std::mem::ManuallyDrop::new(control_handle),
4008 tx_id: header.tx_id,
4009 },
4010 })
4011 }
4012 0x6a5cae7d6d6e04c6 => {
4013 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4014 let mut req = fidl::new_empty!(
4015 fidl::encoding::EmptyPayload,
4016 fidl::encoding::DefaultFuchsiaResourceDialect
4017 );
4018 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
4019 let control_handle =
4020 BufferCollectionControlHandle { inner: this.inner.clone() };
4021 Ok(BufferCollectionRequest::Release { control_handle })
4022 }
4023 0xb41f1624f48c1e9 => {
4024 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4025 let mut req = fidl::new_empty!(
4026 NodeSetNameRequest,
4027 fidl::encoding::DefaultFuchsiaResourceDialect
4028 );
4029 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetNameRequest>(&header, _body_bytes, handles, &mut req)?;
4030 let control_handle =
4031 BufferCollectionControlHandle { inner: this.inner.clone() };
4032 Ok(BufferCollectionRequest::SetName { payload: req, control_handle })
4033 }
4034 0x5cde8914608d99b1 => {
4035 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4036 let mut req = fidl::new_empty!(
4037 NodeSetDebugClientInfoRequest,
4038 fidl::encoding::DefaultFuchsiaResourceDialect
4039 );
4040 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugClientInfoRequest>(&header, _body_bytes, handles, &mut req)?;
4041 let control_handle =
4042 BufferCollectionControlHandle { inner: this.inner.clone() };
4043 Ok(BufferCollectionRequest::SetDebugClientInfo {
4044 payload: req,
4045 control_handle,
4046 })
4047 }
4048 0x716b0af13d5c0806 => {
4049 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4050 let mut req = fidl::new_empty!(
4051 NodeSetDebugTimeoutLogDeadlineRequest,
4052 fidl::encoding::DefaultFuchsiaResourceDialect
4053 );
4054 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugTimeoutLogDeadlineRequest>(&header, _body_bytes, handles, &mut req)?;
4055 let control_handle =
4056 BufferCollectionControlHandle { inner: this.inner.clone() };
4057 Ok(BufferCollectionRequest::SetDebugTimeoutLogDeadline {
4058 payload: req,
4059 control_handle,
4060 })
4061 }
4062 0x5209c77415b4dfad => {
4063 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4064 let mut req = fidl::new_empty!(
4065 fidl::encoding::EmptyPayload,
4066 fidl::encoding::DefaultFuchsiaResourceDialect
4067 );
4068 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
4069 let control_handle =
4070 BufferCollectionControlHandle { inner: this.inner.clone() };
4071 Ok(BufferCollectionRequest::SetVerboseLogging { control_handle })
4072 }
4073 0x5b3d0e51614df053 => {
4074 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
4075 let mut req = fidl::new_empty!(
4076 fidl::encoding::EmptyPayload,
4077 fidl::encoding::DefaultFuchsiaResourceDialect
4078 );
4079 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
4080 let control_handle =
4081 BufferCollectionControlHandle { inner: this.inner.clone() };
4082 Ok(BufferCollectionRequest::GetNodeRef {
4083 responder: BufferCollectionGetNodeRefResponder {
4084 control_handle: std::mem::ManuallyDrop::new(control_handle),
4085 tx_id: header.tx_id,
4086 },
4087 })
4088 }
4089 0x3a58e00157e0825 => {
4090 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
4091 let mut req = fidl::new_empty!(
4092 NodeIsAlternateForRequest,
4093 fidl::encoding::DefaultFuchsiaResourceDialect
4094 );
4095 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeIsAlternateForRequest>(&header, _body_bytes, handles, &mut req)?;
4096 let control_handle =
4097 BufferCollectionControlHandle { inner: this.inner.clone() };
4098 Ok(BufferCollectionRequest::IsAlternateFor {
4099 payload: req,
4100 responder: BufferCollectionIsAlternateForResponder {
4101 control_handle: std::mem::ManuallyDrop::new(control_handle),
4102 tx_id: header.tx_id,
4103 },
4104 })
4105 }
4106 0x77d19a494b78ba8c => {
4107 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
4108 let mut req = fidl::new_empty!(
4109 fidl::encoding::EmptyPayload,
4110 fidl::encoding::DefaultFuchsiaResourceDialect
4111 );
4112 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
4113 let control_handle =
4114 BufferCollectionControlHandle { inner: this.inner.clone() };
4115 Ok(BufferCollectionRequest::GetBufferCollectionId {
4116 responder: BufferCollectionGetBufferCollectionIdResponder {
4117 control_handle: std::mem::ManuallyDrop::new(control_handle),
4118 tx_id: header.tx_id,
4119 },
4120 })
4121 }
4122 0x22dd3ea514eeffe1 => {
4123 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4124 let mut req = fidl::new_empty!(
4125 fidl::encoding::EmptyPayload,
4126 fidl::encoding::DefaultFuchsiaResourceDialect
4127 );
4128 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
4129 let control_handle =
4130 BufferCollectionControlHandle { inner: this.inner.clone() };
4131 Ok(BufferCollectionRequest::SetWeak { control_handle })
4132 }
4133 0x38a44fc4d7724be9 => {
4134 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4135 let mut req = fidl::new_empty!(
4136 NodeSetWeakOkRequest,
4137 fidl::encoding::DefaultFuchsiaResourceDialect
4138 );
4139 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetWeakOkRequest>(&header, _body_bytes, handles, &mut req)?;
4140 let control_handle =
4141 BufferCollectionControlHandle { inner: this.inner.clone() };
4142 Ok(BufferCollectionRequest::SetWeakOk { payload: req, control_handle })
4143 }
4144 0x3f22f2a293d3cdac => {
4145 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4146 let mut req = fidl::new_empty!(
4147 NodeAttachNodeTrackingRequest,
4148 fidl::encoding::DefaultFuchsiaResourceDialect
4149 );
4150 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeAttachNodeTrackingRequest>(&header, _body_bytes, handles, &mut req)?;
4151 let control_handle =
4152 BufferCollectionControlHandle { inner: this.inner.clone() };
4153 Ok(BufferCollectionRequest::AttachNodeTracking {
4154 payload: req,
4155 control_handle,
4156 })
4157 }
4158 0x1fde0f19d650197b => {
4159 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4160 let mut req = fidl::new_empty!(
4161 BufferCollectionSetConstraintsRequest,
4162 fidl::encoding::DefaultFuchsiaResourceDialect
4163 );
4164 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionSetConstraintsRequest>(&header, _body_bytes, handles, &mut req)?;
4165 let control_handle =
4166 BufferCollectionControlHandle { inner: this.inner.clone() };
4167 Ok(BufferCollectionRequest::SetConstraints { payload: req, control_handle })
4168 }
4169 0x62300344b61404e => {
4170 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
4171 let mut req = fidl::new_empty!(
4172 fidl::encoding::EmptyPayload,
4173 fidl::encoding::DefaultFuchsiaResourceDialect
4174 );
4175 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
4176 let control_handle =
4177 BufferCollectionControlHandle { inner: this.inner.clone() };
4178 Ok(BufferCollectionRequest::WaitForAllBuffersAllocated {
4179 responder: BufferCollectionWaitForAllBuffersAllocatedResponder {
4180 control_handle: std::mem::ManuallyDrop::new(control_handle),
4181 tx_id: header.tx_id,
4182 },
4183 })
4184 }
4185 0x35a5fe77ce939c10 => {
4186 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
4187 let mut req = fidl::new_empty!(
4188 fidl::encoding::EmptyPayload,
4189 fidl::encoding::DefaultFuchsiaResourceDialect
4190 );
4191 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
4192 let control_handle =
4193 BufferCollectionControlHandle { inner: this.inner.clone() };
4194 Ok(BufferCollectionRequest::CheckAllBuffersAllocated {
4195 responder: BufferCollectionCheckAllBuffersAllocatedResponder {
4196 control_handle: std::mem::ManuallyDrop::new(control_handle),
4197 tx_id: header.tx_id,
4198 },
4199 })
4200 }
4201 0x46ac7d0008492982 => {
4202 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4203 let mut req = fidl::new_empty!(
4204 BufferCollectionAttachTokenRequest,
4205 fidl::encoding::DefaultFuchsiaResourceDialect
4206 );
4207 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionAttachTokenRequest>(&header, _body_bytes, handles, &mut req)?;
4208 let control_handle =
4209 BufferCollectionControlHandle { inner: this.inner.clone() };
4210 Ok(BufferCollectionRequest::AttachToken { payload: req, control_handle })
4211 }
4212 0x3ecb510113116dcf => {
4213 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4214 let mut req = fidl::new_empty!(
4215 BufferCollectionAttachLifetimeTrackingRequest,
4216 fidl::encoding::DefaultFuchsiaResourceDialect
4217 );
4218 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionAttachLifetimeTrackingRequest>(&header, _body_bytes, handles, &mut req)?;
4219 let control_handle =
4220 BufferCollectionControlHandle { inner: this.inner.clone() };
4221 Ok(BufferCollectionRequest::AttachLifetimeTracking {
4222 payload: req,
4223 control_handle,
4224 })
4225 }
4226 _ if header.tx_id == 0
4227 && header
4228 .dynamic_flags()
4229 .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
4230 {
4231 Ok(BufferCollectionRequest::_UnknownMethod {
4232 ordinal: header.ordinal,
4233 control_handle: BufferCollectionControlHandle {
4234 inner: this.inner.clone(),
4235 },
4236 method_type: fidl::MethodType::OneWay,
4237 })
4238 }
4239 _ if header
4240 .dynamic_flags()
4241 .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
4242 {
4243 this.inner.send_framework_err(
4244 fidl::encoding::FrameworkErr::UnknownMethod,
4245 header.tx_id,
4246 header.ordinal,
4247 header.dynamic_flags(),
4248 (bytes, handles),
4249 )?;
4250 Ok(BufferCollectionRequest::_UnknownMethod {
4251 ordinal: header.ordinal,
4252 control_handle: BufferCollectionControlHandle {
4253 inner: this.inner.clone(),
4254 },
4255 method_type: fidl::MethodType::TwoWay,
4256 })
4257 }
4258 _ => Err(fidl::Error::UnknownOrdinal {
4259 ordinal: header.ordinal,
4260 protocol_name:
4261 <BufferCollectionMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
4262 }),
4263 }))
4264 },
4265 )
4266 }
4267}
4268
4269/// [`fuchsia.sysmem2/BufferCollection`] is a connection directly from a
4270/// participant to sysmem re. a buffer collection; often the buffer collection
4271/// is shared with other participants which have their own `BufferCollection`
4272/// client end(s) associated with the same buffer collection. In other words,
4273/// an instance of the `BufferCollection` interface is a view of a buffer
4274/// collection, not the buffer collection itself.
4275///
4276/// The `BufferCollection` connection exists to facilitate async indication of
4277/// when the buffer collection has been populated with buffers.
4278///
4279/// Also, the channel's closure by the sysmem server is an indication to the
4280/// client that the client should close all VMO handles that were obtained from
4281/// the `BufferCollection` ASAP.
4282///
4283/// Some buffer collections can use enough memory that it can be worth avoiding
4284/// allocation overlap (in time) using
4285/// [`fuchsia.sysmem2/BufferCollection.AttachLifetimeTracking`] so that the
4286/// initiator can tell when enough buffers of the buffer collection have been
4287/// fully deallocated prior to the initiator allocating a new buffer collection.
4288///
4289/// Epitaphs are not used in this protocol.
4290#[derive(Debug)]
4291pub enum BufferCollectionRequest {
4292 /// Ensure that previous messages have been received server side. This is
4293 /// particularly useful after previous messages that created new tokens,
4294 /// because a token must be known to the sysmem server before sending the
4295 /// token to another participant.
4296 ///
4297 /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
4298 /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
4299 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
4300 /// to mitigate the possibility of a hostile/fake
4301 /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
4302 /// Another way is to pass the token to
4303 /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
4304 /// the token as part of exchanging it for a
4305 /// [`fuchsia.sysmem2/BufferCollection`] channel, and
4306 /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
4307 /// of stalling.
4308 ///
4309 /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
4310 /// and then starting and completing a `Sync`, it's then safe to send the
4311 /// `BufferCollectionToken` client ends to other participants knowing the
4312 /// server will recognize the tokens when they're sent by the other
4313 /// participants to sysmem in a
4314 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
4315 /// efficient way to create tokens while avoiding unnecessary round trips.
4316 ///
4317 /// Other options include waiting for each
4318 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
4319 /// individually (using separate call to `Sync` after each), or calling
4320 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
4321 /// converted to a `BufferCollection` via
4322 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
4323 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
4324 /// the sync step and can create multiple tokens at once.
4325 Sync { responder: BufferCollectionSyncResponder },
4326 /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
4327 ///
4328 /// Normally a participant will convert a `BufferCollectionToken` into a
4329 /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
4330 /// `Release` via the token (and then close the channel immediately or
4331 /// shortly later in response to server closing the server end), which
4332 /// avoids causing buffer collection failure. Without a prior `Release`,
4333 /// closing the `BufferCollectionToken` client end will cause buffer
4334 /// collection failure.
4335 ///
4336 /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
4337 ///
4338 /// By default the server handles unexpected closure of a
4339 /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
4340 /// first) by failing the buffer collection. Partly this is to expedite
4341 /// closing VMO handles to reclaim memory when any participant fails. If a
4342 /// participant would like to cleanly close a `BufferCollection` without
4343 /// causing buffer collection failure, the participant can send `Release`
4344 /// before closing the `BufferCollection` client end. The `Release` can
4345 /// occur before or after `SetConstraints`. If before `SetConstraints`, the
4346 /// buffer collection won't require constraints from this node in order to
4347 /// allocate. If after `SetConstraints`, the constraints are retained and
4348 /// aggregated, despite the lack of `BufferCollection` connection at the
4349 /// time of constraints aggregation.
4350 ///
4351 /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
4352 ///
4353 /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
4354 /// end (without `Release` first) will trigger failure of the buffer
4355 /// collection. To close a `BufferCollectionTokenGroup` channel without
4356 /// failing the buffer collection, ensure that AllChildrenPresent() has been
4357 /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
4358 /// client end.
4359 ///
4360 /// If `Release` occurs before
4361 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
4362 /// buffer collection will fail (triggered by reception of `Release` without
4363 /// prior `AllChildrenPresent`). This is intentionally not analogous to how
4364 /// [`fuchsia.sysmem2/BufferCollection.Release`] without
4365 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
4366 /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
4367 /// close requires `AllChildrenPresent` (if not already sent), then
4368 /// `Release`, then close client end.
4369 ///
4370 /// If `Release` occurs after `AllChildrenPresent`, the children and all
4371 /// their constraints remain intact (just as they would if the
4372 /// `BufferCollectionTokenGroup` channel had remained open), and the client
4373 /// end close doesn't trigger buffer collection failure.
4374 ///
4375 /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
4376 ///
4377 /// For brevity, the per-channel-protocol paragraphs above ignore the
4378 /// separate failure domain created by
4379 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
4380 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
4381 /// unexpectedly closes (without `Release` first) and that client end is
4382 /// under a failure domain, instead of failing the whole buffer collection,
4383 /// the failure domain is failed, but the buffer collection itself is
4384 /// isolated from failure of the failure domain. Such failure domains can be
4385 /// nested, in which case only the inner-most failure domain in which the
4386 /// `Node` resides fails.
4387 Release { control_handle: BufferCollectionControlHandle },
4388 /// Set a name for VMOs in this buffer collection.
4389 ///
4390 /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
4391 /// will be truncated to fit. The name of the vmo will be suffixed with the
4392 /// buffer index within the collection (if the suffix fits within
4393 /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
4394 /// listed in the inspect data.
4395 ///
4396 /// The name only affects VMOs allocated after the name is set; this call
4397 /// does not rename existing VMOs. If multiple clients set different names
4398 /// then the larger priority value will win. Setting a new name with the
4399 /// same priority as a prior name doesn't change the name.
4400 ///
4401 /// All table fields are currently required.
4402 ///
4403 /// + request `priority` The name is only set if this is the first `SetName`
4404 /// or if `priority` is greater than any previous `priority` value in
4405 /// prior `SetName` calls across all `Node`(s) of this buffer collection.
4406 /// + request `name` The name for VMOs created under this buffer collection.
4407 SetName { payload: NodeSetNameRequest, control_handle: BufferCollectionControlHandle },
4408 /// Set information about the current client that can be used by sysmem to
4409 /// help diagnose leaking memory and allocation stalls waiting for a
4410 /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
4411 ///
4412 /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
4413 /// `Node`(s) derived from this `Node`, unless overriden by
4414 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
4415 /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
4416 ///
4417 /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
4418 /// `Allocator` is the most efficient way to ensure that all
4419 /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
4420 /// set, and is also more efficient than separately sending the same debug
4421 /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
4422 /// created [`fuchsia.sysmem2/Node`].
4423 ///
4424 /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
4425 /// indicate which client is closing their channel first, leading to subtree
4426 /// failure (which can be normal if the purpose of the subtree is over, but
4427 /// if happening earlier than expected, the client-channel-specific name can
4428 /// help diagnose where the failure is first coming from, from sysmem's
4429 /// point of view).
4430 ///
4431 /// All table fields are currently required.
4432 ///
4433 /// + request `name` This can be an arbitrary string, but the current
4434 /// process name (see `fsl::GetCurrentProcessName`) is a good default.
4435 /// + request `id` This can be an arbitrary id, but the current process ID
4436 /// (see `fsl::GetCurrentProcessKoid`) is a good default.
4437 SetDebugClientInfo {
4438 payload: NodeSetDebugClientInfoRequest,
4439 control_handle: BufferCollectionControlHandle,
4440 },
4441 /// Sysmem logs a warning if sysmem hasn't seen
4442 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
4443 /// within 5 seconds after creation of a new collection.
4444 ///
4445 /// Clients can call this method to change when the log is printed. If
4446 /// multiple client set the deadline, it's unspecified which deadline will
4447 /// take effect.
4448 ///
4449 /// In most cases the default works well.
4450 ///
4451 /// All table fields are currently required.
4452 ///
4453 /// + request `deadline` The time at which sysmem will start trying to log
4454 /// the warning, unless all constraints are with sysmem by then.
4455 SetDebugTimeoutLogDeadline {
4456 payload: NodeSetDebugTimeoutLogDeadlineRequest,
4457 control_handle: BufferCollectionControlHandle,
4458 },
4459 /// This enables verbose logging for the buffer collection.
4460 ///
4461 /// Verbose logging includes constraints set via
4462 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
4463 /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
4464 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
4465 /// the tree of `Node`(s).
4466 ///
4467 /// Normally sysmem prints only a single line complaint when aggregation
4468 /// fails, with just the specific detailed reason that aggregation failed,
4469 /// with little surrounding context. While this is often enough to diagnose
4470 /// a problem if only a small change was made and everything was working
4471 /// before the small change, it's often not particularly helpful for getting
4472 /// a new buffer collection to work for the first time. Especially with
4473 /// more complex trees of nodes, involving things like
4474 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
4475 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
4476 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
4477 /// subtrees of nodes, verbose logging may help in diagnosing what the tree
4478 /// looks like and why it's failing a logical allocation, or why a tree or
4479 /// subtree is failing sooner than expected.
4480 ///
4481 /// The intent of the extra logging is to be acceptable from a performance
4482 /// point of view, under the assumption that verbose logging is only enabled
4483 /// on a low number of buffer collections. If we're not tracking down a bug,
4484 /// we shouldn't send this message.
4485 SetVerboseLogging { control_handle: BufferCollectionControlHandle },
4486 /// This gets a handle that can be used as a parameter to
4487 /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
4488 /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
4489 /// client obtained this handle from this `Node`.
4490 ///
4491 /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
4492 /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
4493 /// despite the two calls typically being on different channels.
4494 ///
4495 /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
4496 ///
4497 /// All table fields are currently required.
4498 ///
4499 /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
4500 /// different `Node` channel, to prove that the client obtained the handle
4501 /// from this `Node`.
4502 GetNodeRef { responder: BufferCollectionGetNodeRefResponder },
4503 /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
4504 /// rooted at a different child token of a common parent
4505 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
4506 /// passed-in `node_ref`.
4507 ///
4508 /// This call is for assisting with admission control de-duplication, and
4509 /// with debugging.
4510 ///
4511 /// The `node_ref` must be obtained using
4512 /// [`fuchsia.sysmem2/Node.GetNodeRef`].
4513 ///
4514 /// The `node_ref` can be a duplicated handle; it's not necessary to call
4515 /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
4516 ///
4517 /// If a calling token may not actually be a valid token at all due to a
4518 /// potentially hostile/untrusted provider of the token, call
4519 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
4520 /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
4521 /// never responds due to a calling token not being a real token (not really
4522 /// talking to sysmem). Another option is to call
4523 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
4524 /// which also validates the token along with converting it to a
4525 /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
4526 ///
4527 /// All table fields are currently required.
4528 ///
4529 /// - response `is_alternate`
4530 /// - true: The first parent node in common between the calling node and
4531 /// the `node_ref` `Node` is a `BufferCollectionTokenGroup`. This means
4532 /// that the calling `Node` and the `node_ref` `Node` will not have both
4533 /// their constraints apply - rather sysmem will choose one or the other
4534 /// of the constraints - never both. This is because only one child of
4535 /// a `BufferCollectionTokenGroup` is selected during logical
4536 /// allocation, with only that one child's subtree contributing to
4537 /// constraints aggregation.
4538 /// - false: The first parent node in common between the calling `Node`
4539 /// and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
4540 /// Currently, this means the first parent node in common is a
4541 /// `BufferCollectionToken` or `BufferCollection` (regardless of not
4542 /// `Release`ed). This means that the calling `Node` and the `node_ref`
4543 /// `Node` may have both their constraints apply during constraints
4544 /// aggregation of the logical allocation, if both `Node`(s) are
4545 /// selected by any parent `BufferCollectionTokenGroup`(s) involved. In
4546 /// this case, there is no `BufferCollectionTokenGroup` that will
4547 /// directly prevent the two `Node`(s) from both being selected and
4548 /// their constraints both aggregated, but even when false, one or both
4549 /// `Node`(s) may still be eliminated from consideration if one or both
4550 /// `Node`(s) has a direct or indirect parent
4551 /// `BufferCollectionTokenGroup` which selects a child subtree other
4552 /// than the subtree containing the calling `Node` or `node_ref` `Node`.
4553 /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
4554 /// associated with the same buffer collection as the calling `Node`.
4555 /// Another reason for this error is if the `node_ref` is an
4556 /// [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
4557 /// a real `node_ref` obtained from `GetNodeRef`.
4558 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
4559 /// `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
4560 /// the needed rights expected on a real `node_ref`.
4561 /// * No other failing status codes are returned by this call. However,
4562 /// sysmem may add additional codes in future, so the client should have
4563 /// sensible default handling for any failing status code.
4564 IsAlternateFor {
4565 payload: NodeIsAlternateForRequest,
4566 responder: BufferCollectionIsAlternateForResponder,
4567 },
4568 /// Get the buffer collection ID. This ID is also available from
4569 /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
4570 /// within the collection).
4571 ///
4572 /// This call is mainly useful in situations where we can't convey a
4573 /// [`fuchsia.sysmem2/BufferCollectionToken`] or
4574 /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
4575 /// handle, which can be joined back up with a `BufferCollection` client end
4576 /// that was created via a different path. Prefer to convey a
4577 /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
4578 ///
4579 /// Trusting a `buffer_collection_id` value from a source other than sysmem
4580 /// is analogous to trusting a koid value from a source other than zircon.
4581 /// Both should be avoided unless really necessary, and both require
4582 /// caution. In some situations it may be reasonable to refer to a
4583 /// pre-established `BufferCollection` by `buffer_collection_id` via a
4584 /// protocol for efficiency reasons, but an incoming value purporting to be
4585 /// a `buffer_collection_id` is not sufficient alone to justify granting the
4586 /// sender of the `buffer_collection_id` any capability. The sender must
4587 /// first prove to a receiver that the sender has/had a VMO or has/had a
4588 /// `BufferCollectionToken` to the same collection by sending a handle that
4589 /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
4590 /// `buffer_collection_id` value. The receiver should take care to avoid
4591 /// assuming that a sender had a `BufferCollectionToken` in cases where the
4592 /// sender has only proven that the sender had a VMO.
4593 ///
4594 /// - response `buffer_collection_id` This ID is unique per buffer
4595 /// collection per boot. Each buffer is uniquely identified by the
4596 /// `buffer_collection_id` and `buffer_index` together.
4597 GetBufferCollectionId { responder: BufferCollectionGetBufferCollectionIdResponder },
4598 /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
4599 /// created after this message to weak, which means that a client's `Node`
4600 /// client end (or a child created after this message) is not alone
4601 /// sufficient to keep allocated VMOs alive.
4602 ///
4603 /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
4604 /// `close_weak_asap`.
4605 ///
4606 /// This message is only permitted before the `Node` becomes ready for
4607 /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
4608 /// * `BufferCollectionToken`: any time
4609 /// * `BufferCollection`: before `SetConstraints`
4610 /// * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
4611 ///
4612 /// Currently, no conversion from strong `Node` to weak `Node` after ready
4613 /// for allocation is provided, but a client can simulate that by creating
4614 /// an additional `Node` before allocation and setting that additional
4615 /// `Node` to weak, and then potentially at some point later sending
4616 /// `Release` and closing the client end of the client's strong `Node`, but
4617 /// keeping the client's weak `Node`.
4618 ///
4619 /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
4620 /// collection failure (all `Node` client end(s) will see
4621 /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
4622 /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
4623 /// this situation until all `Node`(s) are ready for allocation. For initial
4624 /// allocation to succeed, at least one strong `Node` is required to exist
4625 /// at allocation time, but after that client receives VMO handles, that
4626 /// client can `BufferCollection.Release` and close the client end without
4627 /// causing this type of failure.
4628 ///
4629 /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
4630 /// imply `SetWeakOk` with `for_children_also` true, which can be sent
4631 /// separately as appropriate.
4632 SetWeak { control_handle: BufferCollectionControlHandle },
4633 /// This indicates to sysmem that the client is prepared to pay attention to
4634 /// `close_weak_asap`.
4635 ///
4636 /// If sent, this message must be before
4637 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
4638 ///
4639 /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
4640 /// send this message before `WaitForAllBuffersAllocated`, or a parent
4641 /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
4642 /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
4643 /// trigger buffer collection failure.
4644 ///
4645 /// This message is necessary because weak sysmem VMOs have not always been
4646 /// a thing, so older clients are not aware of the need to pay attention to
4647 /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
4648 /// sysmem weak VMO handles asap. By having this message and requiring
4649 /// participants to indicate their acceptance of this aspect of the overall
4650 /// protocol, we avoid situations where an older client is delivered a weak
4651 /// VMO without any way for sysmem to get that VMO to close quickly later
4652 /// (and on a per-buffer basis).
4653 ///
4654 /// A participant that doesn't handle `close_weak_asap` and also doesn't
4655 /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
4656 /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
4657 /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
4658 /// same participant has a child/delegate which does retrieve VMOs, that
4659 /// child/delegate will need to send `SetWeakOk` before
4660 /// `WaitForAllBuffersAllocated`.
4661 ///
4662 /// + request `for_child_nodes_also` If present and true, this means direct
4663 /// child nodes of this node created after this message plus all
4664 /// descendants of those nodes will behave as if `SetWeakOk` was sent on
4665 /// those nodes. Any child node of this node that was created before this
4666 /// message is not included. This setting is "sticky" in the sense that a
4667 /// subsequent `SetWeakOk` without this bool set to true does not reset
4668 /// the server-side bool. If this creates a problem for a participant, a
4669 /// workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
4670 /// tokens instead, as appropriate. A participant should only set
4671 /// `for_child_nodes_also` true if the participant can really promise to
4672 /// obey `close_weak_asap` both for its own weak VMO handles, and for all
4673 /// weak VMO handles held by participants holding the corresponding child
4674 /// `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
4675 /// which are using sysmem(1) can be weak, despite the clients of those
4676 /// sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
4677 /// direct way to find out about `close_weak_asap`. This only applies to
4678 /// descendents of this `Node` which are using sysmem(1), not to this
4679 /// `Node` when converted directly from a sysmem2 token to a sysmem(1)
4680 /// token, which will fail allocation unless an ancestor of this `Node`
4681 /// specified `for_child_nodes_also` true.
4682 SetWeakOk { payload: NodeSetWeakOkRequest, control_handle: BufferCollectionControlHandle },
4683 /// The server_end will be closed after this `Node` and any child nodes have
4684 /// have released their buffer counts, making those counts available for
4685 /// reservation by a different `Node` via
4686 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
4687 ///
4688 /// The `Node` buffer counts may not be released until the entire tree of
4689 /// `Node`(s) is closed or failed, because
4690 /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
4691 /// does not immediately un-reserve the `Node` buffer counts. Instead, the
4692 /// `Node` buffer counts remain reserved until the orphaned node is later
4693 /// cleaned up.
4694 ///
4695 /// If the `Node` exceeds a fairly large number of attached eventpair server
4696 /// ends, a log message will indicate this and the `Node` (and the
4697 /// appropriate) sub-tree will fail.
4698 ///
4699 /// The `server_end` will remain open when
4700 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
4701 /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
4702 /// [`fuchsia.sysmem2/BufferCollection`].
4703 ///
4704 /// This message can also be used with a
4705 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
4706 AttachNodeTracking {
4707 payload: NodeAttachNodeTrackingRequest,
4708 control_handle: BufferCollectionControlHandle,
4709 },
4710 /// Provide [`fuchsia.sysmem2/BufferCollectionConstraints`] to the buffer
4711 /// collection.
4712 ///
4713 /// A participant may only call
4714 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] up to once per
4715 /// [`fuchsia.sysmem2/BufferCollection`].
4716 ///
4717 /// For buffer allocation to be attempted, all holders of a
4718 /// `BufferCollection` client end need to call `SetConstraints` before
4719 /// sysmem will attempt to allocate buffers.
4720 ///
4721 /// + request `constraints` These are the constraints on the buffer
4722 /// collection imposed by the sending client/participant. The
4723 /// `constraints` field is not required to be set. If not set, the client
4724 /// is not setting any actual constraints, but is indicating that the
4725 /// client has no constraints to set. A client that doesn't set the
4726 /// `constraints` field won't receive any VMO handles, but can still find
4727 /// out how many buffers were allocated and can still refer to buffers by
4728 /// their `buffer_index`.
4729 SetConstraints {
4730 payload: BufferCollectionSetConstraintsRequest,
4731 control_handle: BufferCollectionControlHandle,
4732 },
4733 /// Wait until all buffers are allocated.
4734 ///
4735 /// This FIDL call completes when buffers have been allocated, or completes
4736 /// with some failure detail if allocation has been attempted but failed.
4737 ///
4738 /// The following must occur before buffers will be allocated:
4739 /// * All [`fuchsia.sysmem2/BufferCollectionToken`](s) of the buffer
4740 /// collection must be turned in via `BindSharedCollection` to get a
4741 /// [`fuchsia.sysmem2/BufferCollection`] (for brevity, this is assuming
4742 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`] isn't being used),
4743 /// or have had [`fuchsia.sysmem2/BufferCollectionToken.Release`] sent
4744 /// to them.
4745 /// * All [`fuchsia.sysmem2/BufferCollection`](s) of the buffer collection
4746 /// must have had [`fuchsia.sysmem2/BufferCollection.SetConstraints`]
4747 /// sent to them, or had [`fuchsia.sysmem2/BufferCollection.Release`]
4748 /// sent to them.
4749 ///
4750 /// - result `buffer_collection_info` The VMO handles and other related
4751 /// info.
4752 /// * error `[fuchsia.sysmem2/Error.NO_MEMORY]` The request is valid but
4753 /// cannot be fulfilled due to resource exhaustion.
4754 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION`] The request is
4755 /// malformed.
4756 /// * error `[fuchsia.sysmem2/Error.CONSTRAINTS_INTERSECTION_EMPTY`] The
4757 /// request is valid but cannot be satisfied, perhaps due to hardware
4758 /// limitations. This can happen if participants have incompatible
4759 /// constraints (empty intersection, roughly speaking). See the log for
4760 /// more info. In cases where a participant could potentially be treated
4761 /// as optional, see [`BufferCollectionTokenGroup`]. When using
4762 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`], this will be the
4763 /// error code if there aren't enough buffers in the pre-existing
4764 /// collection to satisfy the constraints set on the attached token and
4765 /// any sub-tree of tokens derived from the attached token.
4766 WaitForAllBuffersAllocated { responder: BufferCollectionWaitForAllBuffersAllocatedResponder },
4767 /// Checks whether all the buffers have been allocated, in a polling
4768 /// fashion.
4769 ///
4770 /// * If the buffer collection has been allocated, returns success.
4771 /// * If the buffer collection failed allocation, returns the same
4772 /// [`fuchsia.sysmem2/Error`] as
4773 /// [`fuchsia.sysmem2/BufferCollection/WaitForAllBuffersAllocated`] would
4774 /// return.
4775 /// * error [`fuchsia.sysmem2/Error.PENDING`] The buffer collection hasn't
4776 /// attempted allocation yet. This means that WaitForAllBuffersAllocated
4777 /// would not respond quickly.
4778 CheckAllBuffersAllocated { responder: BufferCollectionCheckAllBuffersAllocatedResponder },
4779 /// Create a new token to add a new participant to an existing logical
4780 /// buffer collection, if the existing collection's buffer counts,
4781 /// constraints, and participants allow.
4782 ///
4783 /// This can be useful in replacing a failed participant, and/or in
4784 /// adding/re-adding a participant after buffers have already been
4785 /// allocated.
4786 ///
4787 /// When [`fuchsia.sysmem2/BufferCollection.AttachToken`] is used, the sub
4788 /// tree rooted at the attached [`fuchsia.sysmem2/BufferCollectionToken`]
4789 /// goes through the normal procedure of setting constraints or closing
4790 /// [`fuchsia.sysmem2/Node`](s), and then appearing to allocate buffers from
4791 /// clients' point of view, despite the possibility that all the buffers
4792 /// were actually allocated previously. This process is called "logical
4793 /// allocation". Most instances of "allocation" in docs for other messages
4794 /// can also be read as "allocation or logical allocation" while remaining
4795 /// valid, but we just say "allocation" in most places for brevity/clarity
4796 /// of explanation, with the details of "logical allocation" left for the
4797 /// docs here on `AttachToken`.
4798 ///
4799 /// Failure of an attached `Node` does not propagate to the parent of the
4800 /// attached `Node`. More generally, failure of a child `Node` is blocked
4801 /// from reaching its parent `Node` if the child is attached, or if the
4802 /// child is dispensable and the failure occurred after logical allocation
4803 /// (see [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`]).
4804 ///
4805 /// A participant may in some scenarios choose to initially use a
4806 /// dispensable token for a given instance of a delegate participant, and
4807 /// then later if the first instance of that delegate participant fails, a
4808 /// new second instance of that delegate participant my be given a token
4809 /// created with `AttachToken`.
4810 ///
4811 /// From the point of view of the [`fuchsia.sysmem2/BufferCollectionToken`]
4812 /// client end, the token acts like any other token. The client can
4813 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] the token as needed,
4814 /// and can send the token to a different process/participant. The
4815 /// `BufferCollectionToken` `Node` should be converted to a
4816 /// `BufferCollection` `Node` as normal by sending
4817 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or can be closed
4818 /// without causing subtree failure by sending
4819 /// [`fuchsia.sysmem2/BufferCollectionToken.Release`]. Assuming the former,
4820 /// the [`fuchsia.sysmem2/BufferCollection.SetConstraints`] message or
4821 /// [`fuchsia.sysmem2/BufferCollection.Release`] message should be sent to
4822 /// the `BufferCollection`.
4823 ///
4824 /// Within the subtree, a success result from
4825 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`] means
4826 /// the subtree participants' constraints were satisfiable using the
4827 /// already-existing buffer collection, the already-established
4828 /// [`fuchsia.sysmem2/BufferCollectionInfo`] including image format
4829 /// constraints, and the already-existing other participants (already added
4830 /// via successful logical allocation) and their specified buffer counts in
4831 /// their constraints. A failure result means the new participants'
4832 /// constraints cannot be satisfied using the existing buffer collection and
4833 /// its already-added participants. Creating a new collection instead may
4834 /// allow all participants' constraints to be satisfied, assuming
4835 /// `SetDispensable` is used in place of `AttachToken`, or a normal token is
4836 /// used.
4837 ///
4838 /// A token created with `AttachToken` performs constraints aggregation with
4839 /// all constraints currently in effect on the buffer collection, plus the
4840 /// attached token under consideration plus child tokens under the attached
4841 /// token which are not themselves an attached token or under such a token.
4842 /// Further subtrees under this subtree are considered for logical
4843 /// allocation only after this subtree has completed logical allocation.
4844 ///
4845 /// Assignment of existing buffers to participants'
4846 /// [`fuchsia.sysmem2/BufferCollectionConstraints.min_buffer_count_for_camping`]
4847 /// etc is first-come first-served, but a child can't logically allocate
4848 /// before all its parents have sent `SetConstraints`.
4849 ///
4850 /// See also [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`], which
4851 /// in contrast to `AttachToken`, has the created token `Node` + child
4852 /// `Node`(s) (in the created subtree but not in any subtree under this
4853 /// subtree) participate in constraints aggregation along with its parent
4854 /// during the parent's allocation or logical allocation.
4855 ///
4856 /// Similar to [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`], the
4857 /// newly created token needs to be [`fuchsia.sysmem2/Node.Sync`]ed to
4858 /// sysmem before the new token can be passed to `BindSharedCollection`. The
4859 /// `Sync` of the new token can be accomplished with
4860 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after converting the created
4861 /// `BufferCollectionToken` to a `BufferCollection`. Alternately,
4862 /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on the new token also
4863 /// works. Or using [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`]
4864 /// works. As usual, a `BufferCollectionToken.Sync` can be started after any
4865 /// `BufferCollectionToken.Duplicate` messages have been sent via the newly
4866 /// created token, to also sync those additional tokens to sysmem using a
4867 /// single round-trip.
4868 ///
4869 /// All table fields are currently required.
4870 ///
4871 /// + request `rights_attentuation_mask` This allows attenuating the VMO
4872 /// rights of the subtree. These values for `rights_attenuation_mask`
4873 /// result in no attenuation (note that 0 is not on this list):
4874 /// + ZX_RIGHT_SAME_RIGHTS (preferred)
4875 /// + 0xFFFFFFFF (this is reasonable when an attenuation mask is computed)
4876 /// + request `token_request` The server end of the `BufferCollectionToken`
4877 /// channel. The client retains the client end.
4878 AttachToken {
4879 payload: BufferCollectionAttachTokenRequest,
4880 control_handle: BufferCollectionControlHandle,
4881 },
4882 /// Set up an eventpair to be signalled (`ZX_EVENTPAIR_PEER_CLOSED`) when
4883 /// buffers have been allocated and only the specified number of buffers (or
4884 /// fewer) remain in the buffer collection.
4885 ///
4886 /// [`fuchsia.sysmem2/BufferCollection.AttachLifetimeTracking`] allows a
4887 /// client to wait until an old buffer collection is fully or mostly
4888 /// deallocated before attempting allocation of a new buffer collection. The
4889 /// eventpair is only signalled when the buffers of this collection have
4890 /// been fully deallocated (not just un-referenced by clients, but all the
4891 /// memory consumed by those buffers has been fully reclaimed/recycled), or
4892 /// when allocation or logical allocation fails for the tree or subtree
4893 /// including this [`fuchsia.sysmem2/BufferCollection`].
4894 ///
4895 /// The eventpair won't be signalled until allocation or logical allocation
4896 /// has completed; until then, the collection's current buffer count is
4897 /// ignored.
4898 ///
4899 /// If logical allocation fails for an attached subtree (using
4900 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]), the server end of the
4901 /// eventpair will close during that failure regardless of the number of
4902 /// buffers potenitally allocated in the overall buffer collection. This is
4903 /// for logical allocation consistency with normal allocation.
4904 ///
4905 /// The lifetime signalled by this event includes asynchronous cleanup of
4906 /// allocated buffers, and this asynchronous cleanup cannot occur until all
4907 /// holders of VMO handles to the buffers have closed those VMO handles.
4908 /// Therefore, clients should take care not to become blocked forever
4909 /// waiting for `ZX_EVENTPAIR_PEER_CLOSED` to be signalled if any of the
4910 /// participants using the logical buffer collection (including the waiter
4911 /// itself) are less trusted, less reliable, or potentially blocked by the
4912 /// wait itself. Waiting asynchronously is recommended. Setting a deadline
4913 /// for the client wait may be prudent, depending on details of how the
4914 /// collection and/or its VMOs are used or shared. Failure to allocate a
4915 /// new/replacement buffer collection is better than getting stuck forever.
4916 ///
4917 /// The sysmem server itself intentionally does not perform any waiting on
4918 /// already-failed collections' VMOs to finish cleaning up before attempting
4919 /// a new allocation, and the sysmem server intentionally doesn't retry
4920 /// allocation if a new allocation fails due to out of memory, even if that
4921 /// failure is potentially due to continued existence of an old collection's
4922 /// VMOs. This `AttachLifetimeTracking` message is how an initiator can
4923 /// mitigate too much overlap of old VMO lifetimes with new VMO lifetimes,
4924 /// as long as the waiting client is careful to not create a deadlock.
4925 ///
4926 /// Continued existence of old collections that are still cleaning up is not
4927 /// the only reason that a new allocation may fail due to insufficient
4928 /// memory, even if the new allocation is allocating physically contiguous
4929 /// buffers. Overall system memory pressure can also be the cause of failure
4930 /// to allocate a new collection. See also
4931 /// [`fuchsia.memorypressure/Provider`].
4932 ///
4933 /// `AttachLifetimeTracking` is meant to be compatible with other protocols
4934 /// with a similar `AttachLifetimeTracking` message; duplicates of the same
4935 /// `eventpair` handle (server end) can be sent via more than one
4936 /// `AttachLifetimeTracking` message to different protocols, and the
4937 /// `ZX_EVENTPAIR_PEER_CLOSED` will be signalled for the client end when all
4938 /// the conditions are met (all holders of duplicates have closed their
4939 /// server end handle(s)). Also, thanks to how eventpair endponts work, the
4940 /// client end can (also) be duplicated without preventing the
4941 /// `ZX_EVENTPAIR_PEER_CLOSED` signal.
4942 ///
4943 /// The server intentionally doesn't "trust" any signals set on the
4944 /// `server_end`. This mechanism intentionally uses only
4945 /// `ZX_EVENTPAIR_PEER_CLOSED` set on the client end, which can't be set
4946 /// "early", and is only set when all handles to the server end eventpair
4947 /// are closed. No meaning is associated with any of the other signals, and
4948 /// clients should ignore any other signal bits on either end of the
4949 /// `eventpair`.
4950 ///
4951 /// The `server_end` may lack `ZX_RIGHT_SIGNAL` or `ZX_RIGHT_SIGNAL_PEER`,
4952 /// but must have `ZX_RIGHT_DUPLICATE` (and must have `ZX_RIGHT_TRANSFER` to
4953 /// transfer without causing `BufferCollection` channel failure).
4954 ///
4955 /// All table fields are currently required.
4956 ///
4957 /// + request `server_end` This eventpair handle will be closed by the
4958 /// sysmem server when buffers have been allocated initially and the
4959 /// number of buffers is then less than or equal to `buffers_remaining`.
4960 /// + request `buffers_remaining` Wait for all but `buffers_remaining` (or
4961 /// fewer) buffers to be fully deallocated. A number greater than zero can
4962 /// be useful in situations where a known number of buffers are
4963 /// intentionally not closed so that the data can continue to be used,
4964 /// such as for keeping the last available video frame displayed in the UI
4965 /// even if the video stream was using protected output buffers. It's
4966 /// outside the scope of the `BufferCollection` interface (at least for
4967 /// now) to determine how many buffers may be held without closing, but
4968 /// it'll typically be in the range 0-2.
4969 AttachLifetimeTracking {
4970 payload: BufferCollectionAttachLifetimeTrackingRequest,
4971 control_handle: BufferCollectionControlHandle,
4972 },
4973 /// An interaction was received which does not match any known method.
4974 #[non_exhaustive]
4975 _UnknownMethod {
4976 /// Ordinal of the method that was called.
4977 ordinal: u64,
4978 control_handle: BufferCollectionControlHandle,
4979 method_type: fidl::MethodType,
4980 },
4981}
4982
4983impl BufferCollectionRequest {
4984 #[allow(irrefutable_let_patterns)]
4985 pub fn into_sync(self) -> Option<(BufferCollectionSyncResponder)> {
4986 if let BufferCollectionRequest::Sync { responder } = self {
4987 Some((responder))
4988 } else {
4989 None
4990 }
4991 }
4992
4993 #[allow(irrefutable_let_patterns)]
4994 pub fn into_release(self) -> Option<(BufferCollectionControlHandle)> {
4995 if let BufferCollectionRequest::Release { control_handle } = self {
4996 Some((control_handle))
4997 } else {
4998 None
4999 }
5000 }
5001
5002 #[allow(irrefutable_let_patterns)]
5003 pub fn into_set_name(self) -> Option<(NodeSetNameRequest, BufferCollectionControlHandle)> {
5004 if let BufferCollectionRequest::SetName { payload, control_handle } = self {
5005 Some((payload, control_handle))
5006 } else {
5007 None
5008 }
5009 }
5010
5011 #[allow(irrefutable_let_patterns)]
5012 pub fn into_set_debug_client_info(
5013 self,
5014 ) -> Option<(NodeSetDebugClientInfoRequest, BufferCollectionControlHandle)> {
5015 if let BufferCollectionRequest::SetDebugClientInfo { payload, control_handle } = self {
5016 Some((payload, control_handle))
5017 } else {
5018 None
5019 }
5020 }
5021
5022 #[allow(irrefutable_let_patterns)]
5023 pub fn into_set_debug_timeout_log_deadline(
5024 self,
5025 ) -> Option<(NodeSetDebugTimeoutLogDeadlineRequest, BufferCollectionControlHandle)> {
5026 if let BufferCollectionRequest::SetDebugTimeoutLogDeadline { payload, control_handle } =
5027 self
5028 {
5029 Some((payload, control_handle))
5030 } else {
5031 None
5032 }
5033 }
5034
5035 #[allow(irrefutable_let_patterns)]
5036 pub fn into_set_verbose_logging(self) -> Option<(BufferCollectionControlHandle)> {
5037 if let BufferCollectionRequest::SetVerboseLogging { control_handle } = self {
5038 Some((control_handle))
5039 } else {
5040 None
5041 }
5042 }
5043
5044 #[allow(irrefutable_let_patterns)]
5045 pub fn into_get_node_ref(self) -> Option<(BufferCollectionGetNodeRefResponder)> {
5046 if let BufferCollectionRequest::GetNodeRef { responder } = self {
5047 Some((responder))
5048 } else {
5049 None
5050 }
5051 }
5052
5053 #[allow(irrefutable_let_patterns)]
5054 pub fn into_is_alternate_for(
5055 self,
5056 ) -> Option<(NodeIsAlternateForRequest, BufferCollectionIsAlternateForResponder)> {
5057 if let BufferCollectionRequest::IsAlternateFor { payload, responder } = self {
5058 Some((payload, responder))
5059 } else {
5060 None
5061 }
5062 }
5063
5064 #[allow(irrefutable_let_patterns)]
5065 pub fn into_get_buffer_collection_id(
5066 self,
5067 ) -> Option<(BufferCollectionGetBufferCollectionIdResponder)> {
5068 if let BufferCollectionRequest::GetBufferCollectionId { responder } = self {
5069 Some((responder))
5070 } else {
5071 None
5072 }
5073 }
5074
5075 #[allow(irrefutable_let_patterns)]
5076 pub fn into_set_weak(self) -> Option<(BufferCollectionControlHandle)> {
5077 if let BufferCollectionRequest::SetWeak { control_handle } = self {
5078 Some((control_handle))
5079 } else {
5080 None
5081 }
5082 }
5083
5084 #[allow(irrefutable_let_patterns)]
5085 pub fn into_set_weak_ok(self) -> Option<(NodeSetWeakOkRequest, BufferCollectionControlHandle)> {
5086 if let BufferCollectionRequest::SetWeakOk { payload, control_handle } = self {
5087 Some((payload, control_handle))
5088 } else {
5089 None
5090 }
5091 }
5092
5093 #[allow(irrefutable_let_patterns)]
5094 pub fn into_attach_node_tracking(
5095 self,
5096 ) -> Option<(NodeAttachNodeTrackingRequest, BufferCollectionControlHandle)> {
5097 if let BufferCollectionRequest::AttachNodeTracking { payload, control_handle } = self {
5098 Some((payload, control_handle))
5099 } else {
5100 None
5101 }
5102 }
5103
5104 #[allow(irrefutable_let_patterns)]
5105 pub fn into_set_constraints(
5106 self,
5107 ) -> Option<(BufferCollectionSetConstraintsRequest, BufferCollectionControlHandle)> {
5108 if let BufferCollectionRequest::SetConstraints { payload, control_handle } = self {
5109 Some((payload, control_handle))
5110 } else {
5111 None
5112 }
5113 }
5114
5115 #[allow(irrefutable_let_patterns)]
5116 pub fn into_wait_for_all_buffers_allocated(
5117 self,
5118 ) -> Option<(BufferCollectionWaitForAllBuffersAllocatedResponder)> {
5119 if let BufferCollectionRequest::WaitForAllBuffersAllocated { responder } = self {
5120 Some((responder))
5121 } else {
5122 None
5123 }
5124 }
5125
5126 #[allow(irrefutable_let_patterns)]
5127 pub fn into_check_all_buffers_allocated(
5128 self,
5129 ) -> Option<(BufferCollectionCheckAllBuffersAllocatedResponder)> {
5130 if let BufferCollectionRequest::CheckAllBuffersAllocated { responder } = self {
5131 Some((responder))
5132 } else {
5133 None
5134 }
5135 }
5136
5137 #[allow(irrefutable_let_patterns)]
5138 pub fn into_attach_token(
5139 self,
5140 ) -> Option<(BufferCollectionAttachTokenRequest, BufferCollectionControlHandle)> {
5141 if let BufferCollectionRequest::AttachToken { payload, control_handle } = self {
5142 Some((payload, control_handle))
5143 } else {
5144 None
5145 }
5146 }
5147
5148 #[allow(irrefutable_let_patterns)]
5149 pub fn into_attach_lifetime_tracking(
5150 self,
5151 ) -> Option<(BufferCollectionAttachLifetimeTrackingRequest, BufferCollectionControlHandle)>
5152 {
5153 if let BufferCollectionRequest::AttachLifetimeTracking { payload, control_handle } = self {
5154 Some((payload, control_handle))
5155 } else {
5156 None
5157 }
5158 }
5159
5160 /// Name of the method defined in FIDL
5161 pub fn method_name(&self) -> &'static str {
5162 match *self {
5163 BufferCollectionRequest::Sync { .. } => "sync",
5164 BufferCollectionRequest::Release { .. } => "release",
5165 BufferCollectionRequest::SetName { .. } => "set_name",
5166 BufferCollectionRequest::SetDebugClientInfo { .. } => "set_debug_client_info",
5167 BufferCollectionRequest::SetDebugTimeoutLogDeadline { .. } => {
5168 "set_debug_timeout_log_deadline"
5169 }
5170 BufferCollectionRequest::SetVerboseLogging { .. } => "set_verbose_logging",
5171 BufferCollectionRequest::GetNodeRef { .. } => "get_node_ref",
5172 BufferCollectionRequest::IsAlternateFor { .. } => "is_alternate_for",
5173 BufferCollectionRequest::GetBufferCollectionId { .. } => "get_buffer_collection_id",
5174 BufferCollectionRequest::SetWeak { .. } => "set_weak",
5175 BufferCollectionRequest::SetWeakOk { .. } => "set_weak_ok",
5176 BufferCollectionRequest::AttachNodeTracking { .. } => "attach_node_tracking",
5177 BufferCollectionRequest::SetConstraints { .. } => "set_constraints",
5178 BufferCollectionRequest::WaitForAllBuffersAllocated { .. } => {
5179 "wait_for_all_buffers_allocated"
5180 }
5181 BufferCollectionRequest::CheckAllBuffersAllocated { .. } => {
5182 "check_all_buffers_allocated"
5183 }
5184 BufferCollectionRequest::AttachToken { .. } => "attach_token",
5185 BufferCollectionRequest::AttachLifetimeTracking { .. } => "attach_lifetime_tracking",
5186 BufferCollectionRequest::_UnknownMethod {
5187 method_type: fidl::MethodType::OneWay,
5188 ..
5189 } => "unknown one-way method",
5190 BufferCollectionRequest::_UnknownMethod {
5191 method_type: fidl::MethodType::TwoWay,
5192 ..
5193 } => "unknown two-way method",
5194 }
5195 }
5196}
5197
5198#[derive(Debug, Clone)]
5199pub struct BufferCollectionControlHandle {
5200 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
5201}
5202
5203impl fidl::endpoints::ControlHandle for BufferCollectionControlHandle {
5204 fn shutdown(&self) {
5205 self.inner.shutdown()
5206 }
5207
5208 fn shutdown_with_epitaph(&self, status: zx_status::Status) {
5209 self.inner.shutdown_with_epitaph(status)
5210 }
5211
5212 fn is_closed(&self) -> bool {
5213 self.inner.channel().is_closed()
5214 }
5215 fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
5216 self.inner.channel().on_closed()
5217 }
5218
5219 #[cfg(target_os = "fuchsia")]
5220 fn signal_peer(
5221 &self,
5222 clear_mask: zx::Signals,
5223 set_mask: zx::Signals,
5224 ) -> Result<(), zx_status::Status> {
5225 use fidl::Peered;
5226 self.inner.channel().signal_peer(clear_mask, set_mask)
5227 }
5228}
5229
5230impl BufferCollectionControlHandle {}
5231
5232#[must_use = "FIDL methods require a response to be sent"]
5233#[derive(Debug)]
5234pub struct BufferCollectionSyncResponder {
5235 control_handle: std::mem::ManuallyDrop<BufferCollectionControlHandle>,
5236 tx_id: u32,
5237}
5238
5239/// Set the the channel to be shutdown (see [`BufferCollectionControlHandle::shutdown`])
5240/// if the responder is dropped without sending a response, so that the client
5241/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
5242impl std::ops::Drop for BufferCollectionSyncResponder {
5243 fn drop(&mut self) {
5244 self.control_handle.shutdown();
5245 // Safety: drops once, never accessed again
5246 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5247 }
5248}
5249
5250impl fidl::endpoints::Responder for BufferCollectionSyncResponder {
5251 type ControlHandle = BufferCollectionControlHandle;
5252
5253 fn control_handle(&self) -> &BufferCollectionControlHandle {
5254 &self.control_handle
5255 }
5256
5257 fn drop_without_shutdown(mut self) {
5258 // Safety: drops once, never accessed again due to mem::forget
5259 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5260 // Prevent Drop from running (which would shut down the channel)
5261 std::mem::forget(self);
5262 }
5263}
5264
5265impl BufferCollectionSyncResponder {
5266 /// Sends a response to the FIDL transaction.
5267 ///
5268 /// Sets the channel to shutdown if an error occurs.
5269 pub fn send(self) -> Result<(), fidl::Error> {
5270 let _result = self.send_raw();
5271 if _result.is_err() {
5272 self.control_handle.shutdown();
5273 }
5274 self.drop_without_shutdown();
5275 _result
5276 }
5277
5278 /// Similar to "send" but does not shutdown the channel if an error occurs.
5279 pub fn send_no_shutdown_on_err(self) -> Result<(), fidl::Error> {
5280 let _result = self.send_raw();
5281 self.drop_without_shutdown();
5282 _result
5283 }
5284
5285 fn send_raw(&self) -> Result<(), fidl::Error> {
5286 self.control_handle.inner.send::<fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>>(
5287 fidl::encoding::Flexible::new(()),
5288 self.tx_id,
5289 0x11ac2555cf575b54,
5290 fidl::encoding::DynamicFlags::FLEXIBLE,
5291 )
5292 }
5293}
5294
5295#[must_use = "FIDL methods require a response to be sent"]
5296#[derive(Debug)]
5297pub struct BufferCollectionGetNodeRefResponder {
5298 control_handle: std::mem::ManuallyDrop<BufferCollectionControlHandle>,
5299 tx_id: u32,
5300}
5301
5302/// Set the the channel to be shutdown (see [`BufferCollectionControlHandle::shutdown`])
5303/// if the responder is dropped without sending a response, so that the client
5304/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
5305impl std::ops::Drop for BufferCollectionGetNodeRefResponder {
5306 fn drop(&mut self) {
5307 self.control_handle.shutdown();
5308 // Safety: drops once, never accessed again
5309 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5310 }
5311}
5312
5313impl fidl::endpoints::Responder for BufferCollectionGetNodeRefResponder {
5314 type ControlHandle = BufferCollectionControlHandle;
5315
5316 fn control_handle(&self) -> &BufferCollectionControlHandle {
5317 &self.control_handle
5318 }
5319
5320 fn drop_without_shutdown(mut self) {
5321 // Safety: drops once, never accessed again due to mem::forget
5322 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5323 // Prevent Drop from running (which would shut down the channel)
5324 std::mem::forget(self);
5325 }
5326}
5327
5328impl BufferCollectionGetNodeRefResponder {
5329 /// Sends a response to the FIDL transaction.
5330 ///
5331 /// Sets the channel to shutdown if an error occurs.
5332 pub fn send(self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
5333 let _result = self.send_raw(payload);
5334 if _result.is_err() {
5335 self.control_handle.shutdown();
5336 }
5337 self.drop_without_shutdown();
5338 _result
5339 }
5340
5341 /// Similar to "send" but does not shutdown the channel if an error occurs.
5342 pub fn send_no_shutdown_on_err(
5343 self,
5344 mut payload: NodeGetNodeRefResponse,
5345 ) -> Result<(), fidl::Error> {
5346 let _result = self.send_raw(payload);
5347 self.drop_without_shutdown();
5348 _result
5349 }
5350
5351 fn send_raw(&self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
5352 self.control_handle.inner.send::<fidl::encoding::FlexibleType<NodeGetNodeRefResponse>>(
5353 fidl::encoding::Flexible::new(&mut payload),
5354 self.tx_id,
5355 0x5b3d0e51614df053,
5356 fidl::encoding::DynamicFlags::FLEXIBLE,
5357 )
5358 }
5359}
5360
5361#[must_use = "FIDL methods require a response to be sent"]
5362#[derive(Debug)]
5363pub struct BufferCollectionIsAlternateForResponder {
5364 control_handle: std::mem::ManuallyDrop<BufferCollectionControlHandle>,
5365 tx_id: u32,
5366}
5367
5368/// Set the the channel to be shutdown (see [`BufferCollectionControlHandle::shutdown`])
5369/// if the responder is dropped without sending a response, so that the client
5370/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
5371impl std::ops::Drop for BufferCollectionIsAlternateForResponder {
5372 fn drop(&mut self) {
5373 self.control_handle.shutdown();
5374 // Safety: drops once, never accessed again
5375 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5376 }
5377}
5378
5379impl fidl::endpoints::Responder for BufferCollectionIsAlternateForResponder {
5380 type ControlHandle = BufferCollectionControlHandle;
5381
5382 fn control_handle(&self) -> &BufferCollectionControlHandle {
5383 &self.control_handle
5384 }
5385
5386 fn drop_without_shutdown(mut self) {
5387 // Safety: drops once, never accessed again due to mem::forget
5388 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5389 // Prevent Drop from running (which would shut down the channel)
5390 std::mem::forget(self);
5391 }
5392}
5393
5394impl BufferCollectionIsAlternateForResponder {
5395 /// Sends a response to the FIDL transaction.
5396 ///
5397 /// Sets the channel to shutdown if an error occurs.
5398 pub fn send(
5399 self,
5400 mut result: Result<&NodeIsAlternateForResponse, Error>,
5401 ) -> Result<(), fidl::Error> {
5402 let _result = self.send_raw(result);
5403 if _result.is_err() {
5404 self.control_handle.shutdown();
5405 }
5406 self.drop_without_shutdown();
5407 _result
5408 }
5409
5410 /// Similar to "send" but does not shutdown the channel if an error occurs.
5411 pub fn send_no_shutdown_on_err(
5412 self,
5413 mut result: Result<&NodeIsAlternateForResponse, Error>,
5414 ) -> Result<(), fidl::Error> {
5415 let _result = self.send_raw(result);
5416 self.drop_without_shutdown();
5417 _result
5418 }
5419
5420 fn send_raw(
5421 &self,
5422 mut result: Result<&NodeIsAlternateForResponse, Error>,
5423 ) -> Result<(), fidl::Error> {
5424 self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
5425 NodeIsAlternateForResponse,
5426 Error,
5427 >>(
5428 fidl::encoding::FlexibleResult::new(result),
5429 self.tx_id,
5430 0x3a58e00157e0825,
5431 fidl::encoding::DynamicFlags::FLEXIBLE,
5432 )
5433 }
5434}
5435
5436#[must_use = "FIDL methods require a response to be sent"]
5437#[derive(Debug)]
5438pub struct BufferCollectionGetBufferCollectionIdResponder {
5439 control_handle: std::mem::ManuallyDrop<BufferCollectionControlHandle>,
5440 tx_id: u32,
5441}
5442
5443/// Set the the channel to be shutdown (see [`BufferCollectionControlHandle::shutdown`])
5444/// if the responder is dropped without sending a response, so that the client
5445/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
5446impl std::ops::Drop for BufferCollectionGetBufferCollectionIdResponder {
5447 fn drop(&mut self) {
5448 self.control_handle.shutdown();
5449 // Safety: drops once, never accessed again
5450 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5451 }
5452}
5453
5454impl fidl::endpoints::Responder for BufferCollectionGetBufferCollectionIdResponder {
5455 type ControlHandle = BufferCollectionControlHandle;
5456
5457 fn control_handle(&self) -> &BufferCollectionControlHandle {
5458 &self.control_handle
5459 }
5460
5461 fn drop_without_shutdown(mut self) {
5462 // Safety: drops once, never accessed again due to mem::forget
5463 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5464 // Prevent Drop from running (which would shut down the channel)
5465 std::mem::forget(self);
5466 }
5467}
5468
5469impl BufferCollectionGetBufferCollectionIdResponder {
5470 /// Sends a response to the FIDL transaction.
5471 ///
5472 /// Sets the channel to shutdown if an error occurs.
5473 pub fn send(self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
5474 let _result = self.send_raw(payload);
5475 if _result.is_err() {
5476 self.control_handle.shutdown();
5477 }
5478 self.drop_without_shutdown();
5479 _result
5480 }
5481
5482 /// Similar to "send" but does not shutdown the channel if an error occurs.
5483 pub fn send_no_shutdown_on_err(
5484 self,
5485 mut payload: &NodeGetBufferCollectionIdResponse,
5486 ) -> Result<(), fidl::Error> {
5487 let _result = self.send_raw(payload);
5488 self.drop_without_shutdown();
5489 _result
5490 }
5491
5492 fn send_raw(&self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
5493 self.control_handle
5494 .inner
5495 .send::<fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>>(
5496 fidl::encoding::Flexible::new(payload),
5497 self.tx_id,
5498 0x77d19a494b78ba8c,
5499 fidl::encoding::DynamicFlags::FLEXIBLE,
5500 )
5501 }
5502}
5503
5504#[must_use = "FIDL methods require a response to be sent"]
5505#[derive(Debug)]
5506pub struct BufferCollectionWaitForAllBuffersAllocatedResponder {
5507 control_handle: std::mem::ManuallyDrop<BufferCollectionControlHandle>,
5508 tx_id: u32,
5509}
5510
5511/// Set the the channel to be shutdown (see [`BufferCollectionControlHandle::shutdown`])
5512/// if the responder is dropped without sending a response, so that the client
5513/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
5514impl std::ops::Drop for BufferCollectionWaitForAllBuffersAllocatedResponder {
5515 fn drop(&mut self) {
5516 self.control_handle.shutdown();
5517 // Safety: drops once, never accessed again
5518 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5519 }
5520}
5521
5522impl fidl::endpoints::Responder for BufferCollectionWaitForAllBuffersAllocatedResponder {
5523 type ControlHandle = BufferCollectionControlHandle;
5524
5525 fn control_handle(&self) -> &BufferCollectionControlHandle {
5526 &self.control_handle
5527 }
5528
5529 fn drop_without_shutdown(mut self) {
5530 // Safety: drops once, never accessed again due to mem::forget
5531 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5532 // Prevent Drop from running (which would shut down the channel)
5533 std::mem::forget(self);
5534 }
5535}
5536
5537impl BufferCollectionWaitForAllBuffersAllocatedResponder {
5538 /// Sends a response to the FIDL transaction.
5539 ///
5540 /// Sets the channel to shutdown if an error occurs.
5541 pub fn send(
5542 self,
5543 mut result: Result<BufferCollectionWaitForAllBuffersAllocatedResponse, Error>,
5544 ) -> Result<(), fidl::Error> {
5545 let _result = self.send_raw(result);
5546 if _result.is_err() {
5547 self.control_handle.shutdown();
5548 }
5549 self.drop_without_shutdown();
5550 _result
5551 }
5552
5553 /// Similar to "send" but does not shutdown the channel if an error occurs.
5554 pub fn send_no_shutdown_on_err(
5555 self,
5556 mut result: Result<BufferCollectionWaitForAllBuffersAllocatedResponse, Error>,
5557 ) -> Result<(), fidl::Error> {
5558 let _result = self.send_raw(result);
5559 self.drop_without_shutdown();
5560 _result
5561 }
5562
5563 fn send_raw(
5564 &self,
5565 mut result: Result<BufferCollectionWaitForAllBuffersAllocatedResponse, Error>,
5566 ) -> Result<(), fidl::Error> {
5567 self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
5568 BufferCollectionWaitForAllBuffersAllocatedResponse,
5569 Error,
5570 >>(
5571 fidl::encoding::FlexibleResult::new(result.as_mut().map_err(|e| *e)),
5572 self.tx_id,
5573 0x62300344b61404e,
5574 fidl::encoding::DynamicFlags::FLEXIBLE,
5575 )
5576 }
5577}
5578
5579#[must_use = "FIDL methods require a response to be sent"]
5580#[derive(Debug)]
5581pub struct BufferCollectionCheckAllBuffersAllocatedResponder {
5582 control_handle: std::mem::ManuallyDrop<BufferCollectionControlHandle>,
5583 tx_id: u32,
5584}
5585
5586/// Set the the channel to be shutdown (see [`BufferCollectionControlHandle::shutdown`])
5587/// if the responder is dropped without sending a response, so that the client
5588/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
5589impl std::ops::Drop for BufferCollectionCheckAllBuffersAllocatedResponder {
5590 fn drop(&mut self) {
5591 self.control_handle.shutdown();
5592 // Safety: drops once, never accessed again
5593 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5594 }
5595}
5596
5597impl fidl::endpoints::Responder for BufferCollectionCheckAllBuffersAllocatedResponder {
5598 type ControlHandle = BufferCollectionControlHandle;
5599
5600 fn control_handle(&self) -> &BufferCollectionControlHandle {
5601 &self.control_handle
5602 }
5603
5604 fn drop_without_shutdown(mut self) {
5605 // Safety: drops once, never accessed again due to mem::forget
5606 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5607 // Prevent Drop from running (which would shut down the channel)
5608 std::mem::forget(self);
5609 }
5610}
5611
5612impl BufferCollectionCheckAllBuffersAllocatedResponder {
5613 /// Sends a response to the FIDL transaction.
5614 ///
5615 /// Sets the channel to shutdown if an error occurs.
5616 pub fn send(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
5617 let _result = self.send_raw(result);
5618 if _result.is_err() {
5619 self.control_handle.shutdown();
5620 }
5621 self.drop_without_shutdown();
5622 _result
5623 }
5624
5625 /// Similar to "send" but does not shutdown the channel if an error occurs.
5626 pub fn send_no_shutdown_on_err(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
5627 let _result = self.send_raw(result);
5628 self.drop_without_shutdown();
5629 _result
5630 }
5631
5632 fn send_raw(&self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
5633 self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
5634 fidl::encoding::EmptyStruct,
5635 Error,
5636 >>(
5637 fidl::encoding::FlexibleResult::new(result),
5638 self.tx_id,
5639 0x35a5fe77ce939c10,
5640 fidl::encoding::DynamicFlags::FLEXIBLE,
5641 )
5642 }
5643}
5644
5645#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
5646pub struct BufferCollectionTokenMarker;
5647
5648impl fidl::endpoints::ProtocolMarker for BufferCollectionTokenMarker {
5649 type Proxy = BufferCollectionTokenProxy;
5650 type RequestStream = BufferCollectionTokenRequestStream;
5651 #[cfg(target_os = "fuchsia")]
5652 type SynchronousProxy = BufferCollectionTokenSynchronousProxy;
5653
5654 const DEBUG_NAME: &'static str = "(anonymous) BufferCollectionToken";
5655}
5656
5657pub trait BufferCollectionTokenProxyInterface: Send + Sync {
5658 type SyncResponseFut: std::future::Future<Output = Result<(), fidl::Error>> + Send;
5659 fn r#sync(&self) -> Self::SyncResponseFut;
5660 fn r#release(&self) -> Result<(), fidl::Error>;
5661 fn r#set_name(&self, payload: &NodeSetNameRequest) -> Result<(), fidl::Error>;
5662 fn r#set_debug_client_info(
5663 &self,
5664 payload: &NodeSetDebugClientInfoRequest,
5665 ) -> Result<(), fidl::Error>;
5666 fn r#set_debug_timeout_log_deadline(
5667 &self,
5668 payload: &NodeSetDebugTimeoutLogDeadlineRequest,
5669 ) -> Result<(), fidl::Error>;
5670 fn r#set_verbose_logging(&self) -> Result<(), fidl::Error>;
5671 type GetNodeRefResponseFut: std::future::Future<Output = Result<NodeGetNodeRefResponse, fidl::Error>>
5672 + Send;
5673 fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut;
5674 type IsAlternateForResponseFut: std::future::Future<Output = Result<NodeIsAlternateForResult, fidl::Error>>
5675 + Send;
5676 fn r#is_alternate_for(
5677 &self,
5678 payload: NodeIsAlternateForRequest,
5679 ) -> Self::IsAlternateForResponseFut;
5680 type GetBufferCollectionIdResponseFut: std::future::Future<Output = Result<NodeGetBufferCollectionIdResponse, fidl::Error>>
5681 + Send;
5682 fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut;
5683 fn r#set_weak(&self) -> Result<(), fidl::Error>;
5684 fn r#set_weak_ok(&self, payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error>;
5685 fn r#attach_node_tracking(
5686 &self,
5687 payload: NodeAttachNodeTrackingRequest,
5688 ) -> Result<(), fidl::Error>;
5689 type DuplicateSyncResponseFut: std::future::Future<
5690 Output = Result<BufferCollectionTokenDuplicateSyncResponse, fidl::Error>,
5691 > + Send;
5692 fn r#duplicate_sync(
5693 &self,
5694 payload: &BufferCollectionTokenDuplicateSyncRequest,
5695 ) -> Self::DuplicateSyncResponseFut;
5696 fn r#duplicate(
5697 &self,
5698 payload: BufferCollectionTokenDuplicateRequest,
5699 ) -> Result<(), fidl::Error>;
5700 fn r#set_dispensable(&self) -> Result<(), fidl::Error>;
5701 fn r#create_buffer_collection_token_group(
5702 &self,
5703 payload: BufferCollectionTokenCreateBufferCollectionTokenGroupRequest,
5704 ) -> Result<(), fidl::Error>;
5705}
5706#[derive(Debug)]
5707#[cfg(target_os = "fuchsia")]
5708pub struct BufferCollectionTokenSynchronousProxy {
5709 client: fidl::client::sync::Client,
5710}
5711
5712#[cfg(target_os = "fuchsia")]
5713impl fidl::endpoints::SynchronousProxy for BufferCollectionTokenSynchronousProxy {
5714 type Proxy = BufferCollectionTokenProxy;
5715 type Protocol = BufferCollectionTokenMarker;
5716
5717 fn from_channel(inner: fidl::Channel) -> Self {
5718 Self::new(inner)
5719 }
5720
5721 fn into_channel(self) -> fidl::Channel {
5722 self.client.into_channel()
5723 }
5724
5725 fn as_channel(&self) -> &fidl::Channel {
5726 self.client.as_channel()
5727 }
5728}
5729
5730#[cfg(target_os = "fuchsia")]
5731impl BufferCollectionTokenSynchronousProxy {
5732 pub fn new(channel: fidl::Channel) -> Self {
5733 let protocol_name =
5734 <BufferCollectionTokenMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
5735 Self { client: fidl::client::sync::Client::new(channel, protocol_name) }
5736 }
5737
5738 pub fn into_channel(self) -> fidl::Channel {
5739 self.client.into_channel()
5740 }
5741
5742 /// Waits until an event arrives and returns it. It is safe for other
5743 /// threads to make concurrent requests while waiting for an event.
5744 pub fn wait_for_event(
5745 &self,
5746 deadline: zx::MonotonicInstant,
5747 ) -> Result<BufferCollectionTokenEvent, fidl::Error> {
5748 BufferCollectionTokenEvent::decode(self.client.wait_for_event(deadline)?)
5749 }
5750
5751 /// Ensure that previous messages have been received server side. This is
5752 /// particularly useful after previous messages that created new tokens,
5753 /// because a token must be known to the sysmem server before sending the
5754 /// token to another participant.
5755 ///
5756 /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
5757 /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
5758 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
5759 /// to mitigate the possibility of a hostile/fake
5760 /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
5761 /// Another way is to pass the token to
5762 /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
5763 /// the token as part of exchanging it for a
5764 /// [`fuchsia.sysmem2/BufferCollection`] channel, and
5765 /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
5766 /// of stalling.
5767 ///
5768 /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
5769 /// and then starting and completing a `Sync`, it's then safe to send the
5770 /// `BufferCollectionToken` client ends to other participants knowing the
5771 /// server will recognize the tokens when they're sent by the other
5772 /// participants to sysmem in a
5773 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
5774 /// efficient way to create tokens while avoiding unnecessary round trips.
5775 ///
5776 /// Other options include waiting for each
5777 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
5778 /// individually (using separate call to `Sync` after each), or calling
5779 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
5780 /// converted to a `BufferCollection` via
5781 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
5782 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
5783 /// the sync step and can create multiple tokens at once.
5784 pub fn r#sync(&self, ___deadline: zx::MonotonicInstant) -> Result<(), fidl::Error> {
5785 let _response = self.client.send_query::<
5786 fidl::encoding::EmptyPayload,
5787 fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
5788 >(
5789 (),
5790 0x11ac2555cf575b54,
5791 fidl::encoding::DynamicFlags::FLEXIBLE,
5792 ___deadline,
5793 )?
5794 .into_result::<BufferCollectionTokenMarker>("sync")?;
5795 Ok(_response)
5796 }
5797
5798 /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
5799 ///
5800 /// Normally a participant will convert a `BufferCollectionToken` into a
5801 /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
5802 /// `Release` via the token (and then close the channel immediately or
5803 /// shortly later in response to server closing the server end), which
5804 /// avoids causing buffer collection failure. Without a prior `Release`,
5805 /// closing the `BufferCollectionToken` client end will cause buffer
5806 /// collection failure.
5807 ///
5808 /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
5809 ///
5810 /// By default the server handles unexpected closure of a
5811 /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
5812 /// first) by failing the buffer collection. Partly this is to expedite
5813 /// closing VMO handles to reclaim memory when any participant fails. If a
5814 /// participant would like to cleanly close a `BufferCollection` without
5815 /// causing buffer collection failure, the participant can send `Release`
5816 /// before closing the `BufferCollection` client end. The `Release` can
5817 /// occur before or after `SetConstraints`. If before `SetConstraints`, the
5818 /// buffer collection won't require constraints from this node in order to
5819 /// allocate. If after `SetConstraints`, the constraints are retained and
5820 /// aggregated, despite the lack of `BufferCollection` connection at the
5821 /// time of constraints aggregation.
5822 ///
5823 /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
5824 ///
5825 /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
5826 /// end (without `Release` first) will trigger failure of the buffer
5827 /// collection. To close a `BufferCollectionTokenGroup` channel without
5828 /// failing the buffer collection, ensure that AllChildrenPresent() has been
5829 /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
5830 /// client end.
5831 ///
5832 /// If `Release` occurs before
5833 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
5834 /// buffer collection will fail (triggered by reception of `Release` without
5835 /// prior `AllChildrenPresent`). This is intentionally not analogous to how
5836 /// [`fuchsia.sysmem2/BufferCollection.Release`] without
5837 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
5838 /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
5839 /// close requires `AllChildrenPresent` (if not already sent), then
5840 /// `Release`, then close client end.
5841 ///
5842 /// If `Release` occurs after `AllChildrenPresent`, the children and all
5843 /// their constraints remain intact (just as they would if the
5844 /// `BufferCollectionTokenGroup` channel had remained open), and the client
5845 /// end close doesn't trigger buffer collection failure.
5846 ///
5847 /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
5848 ///
5849 /// For brevity, the per-channel-protocol paragraphs above ignore the
5850 /// separate failure domain created by
5851 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
5852 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
5853 /// unexpectedly closes (without `Release` first) and that client end is
5854 /// under a failure domain, instead of failing the whole buffer collection,
5855 /// the failure domain is failed, but the buffer collection itself is
5856 /// isolated from failure of the failure domain. Such failure domains can be
5857 /// nested, in which case only the inner-most failure domain in which the
5858 /// `Node` resides fails.
5859 pub fn r#release(&self) -> Result<(), fidl::Error> {
5860 self.client.send::<fidl::encoding::EmptyPayload>(
5861 (),
5862 0x6a5cae7d6d6e04c6,
5863 fidl::encoding::DynamicFlags::FLEXIBLE,
5864 )
5865 }
5866
5867 /// Set a name for VMOs in this buffer collection.
5868 ///
5869 /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
5870 /// will be truncated to fit. The name of the vmo will be suffixed with the
5871 /// buffer index within the collection (if the suffix fits within
5872 /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
5873 /// listed in the inspect data.
5874 ///
5875 /// The name only affects VMOs allocated after the name is set; this call
5876 /// does not rename existing VMOs. If multiple clients set different names
5877 /// then the larger priority value will win. Setting a new name with the
5878 /// same priority as a prior name doesn't change the name.
5879 ///
5880 /// All table fields are currently required.
5881 ///
5882 /// + request `priority` The name is only set if this is the first `SetName`
5883 /// or if `priority` is greater than any previous `priority` value in
5884 /// prior `SetName` calls across all `Node`(s) of this buffer collection.
5885 /// + request `name` The name for VMOs created under this buffer collection.
5886 pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
5887 self.client.send::<NodeSetNameRequest>(
5888 payload,
5889 0xb41f1624f48c1e9,
5890 fidl::encoding::DynamicFlags::FLEXIBLE,
5891 )
5892 }
5893
5894 /// Set information about the current client that can be used by sysmem to
5895 /// help diagnose leaking memory and allocation stalls waiting for a
5896 /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
5897 ///
5898 /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
5899 /// `Node`(s) derived from this `Node`, unless overriden by
5900 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
5901 /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
5902 ///
5903 /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
5904 /// `Allocator` is the most efficient way to ensure that all
5905 /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
5906 /// set, and is also more efficient than separately sending the same debug
5907 /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
5908 /// created [`fuchsia.sysmem2/Node`].
5909 ///
5910 /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
5911 /// indicate which client is closing their channel first, leading to subtree
5912 /// failure (which can be normal if the purpose of the subtree is over, but
5913 /// if happening earlier than expected, the client-channel-specific name can
5914 /// help diagnose where the failure is first coming from, from sysmem's
5915 /// point of view).
5916 ///
5917 /// All table fields are currently required.
5918 ///
5919 /// + request `name` This can be an arbitrary string, but the current
5920 /// process name (see `fsl::GetCurrentProcessName`) is a good default.
5921 /// + request `id` This can be an arbitrary id, but the current process ID
5922 /// (see `fsl::GetCurrentProcessKoid`) is a good default.
5923 pub fn r#set_debug_client_info(
5924 &self,
5925 mut payload: &NodeSetDebugClientInfoRequest,
5926 ) -> Result<(), fidl::Error> {
5927 self.client.send::<NodeSetDebugClientInfoRequest>(
5928 payload,
5929 0x5cde8914608d99b1,
5930 fidl::encoding::DynamicFlags::FLEXIBLE,
5931 )
5932 }
5933
5934 /// Sysmem logs a warning if sysmem hasn't seen
5935 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
5936 /// within 5 seconds after creation of a new collection.
5937 ///
5938 /// Clients can call this method to change when the log is printed. If
5939 /// multiple client set the deadline, it's unspecified which deadline will
5940 /// take effect.
5941 ///
5942 /// In most cases the default works well.
5943 ///
5944 /// All table fields are currently required.
5945 ///
5946 /// + request `deadline` The time at which sysmem will start trying to log
5947 /// the warning, unless all constraints are with sysmem by then.
5948 pub fn r#set_debug_timeout_log_deadline(
5949 &self,
5950 mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
5951 ) -> Result<(), fidl::Error> {
5952 self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
5953 payload,
5954 0x716b0af13d5c0806,
5955 fidl::encoding::DynamicFlags::FLEXIBLE,
5956 )
5957 }
5958
5959 /// This enables verbose logging for the buffer collection.
5960 ///
5961 /// Verbose logging includes constraints set via
5962 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
5963 /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
5964 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
5965 /// the tree of `Node`(s).
5966 ///
5967 /// Normally sysmem prints only a single line complaint when aggregation
5968 /// fails, with just the specific detailed reason that aggregation failed,
5969 /// with little surrounding context. While this is often enough to diagnose
5970 /// a problem if only a small change was made and everything was working
5971 /// before the small change, it's often not particularly helpful for getting
5972 /// a new buffer collection to work for the first time. Especially with
5973 /// more complex trees of nodes, involving things like
5974 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
5975 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
5976 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
5977 /// subtrees of nodes, verbose logging may help in diagnosing what the tree
5978 /// looks like and why it's failing a logical allocation, or why a tree or
5979 /// subtree is failing sooner than expected.
5980 ///
5981 /// The intent of the extra logging is to be acceptable from a performance
5982 /// point of view, under the assumption that verbose logging is only enabled
5983 /// on a low number of buffer collections. If we're not tracking down a bug,
5984 /// we shouldn't send this message.
5985 pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
5986 self.client.send::<fidl::encoding::EmptyPayload>(
5987 (),
5988 0x5209c77415b4dfad,
5989 fidl::encoding::DynamicFlags::FLEXIBLE,
5990 )
5991 }
5992
5993 /// This gets a handle that can be used as a parameter to
5994 /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
5995 /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
5996 /// client obtained this handle from this `Node`.
5997 ///
5998 /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
5999 /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
6000 /// despite the two calls typically being on different channels.
6001 ///
6002 /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
6003 ///
6004 /// All table fields are currently required.
6005 ///
6006 /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
6007 /// different `Node` channel, to prove that the client obtained the handle
6008 /// from this `Node`.
6009 pub fn r#get_node_ref(
6010 &self,
6011 ___deadline: zx::MonotonicInstant,
6012 ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
6013 let _response = self.client.send_query::<
6014 fidl::encoding::EmptyPayload,
6015 fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
6016 >(
6017 (),
6018 0x5b3d0e51614df053,
6019 fidl::encoding::DynamicFlags::FLEXIBLE,
6020 ___deadline,
6021 )?
6022 .into_result::<BufferCollectionTokenMarker>("get_node_ref")?;
6023 Ok(_response)
6024 }
6025
6026 /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
6027 /// rooted at a different child token of a common parent
6028 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
6029 /// passed-in `node_ref`.
6030 ///
6031 /// This call is for assisting with admission control de-duplication, and
6032 /// with debugging.
6033 ///
6034 /// The `node_ref` must be obtained using
6035 /// [`fuchsia.sysmem2/Node.GetNodeRef`].
6036 ///
6037 /// The `node_ref` can be a duplicated handle; it's not necessary to call
6038 /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
6039 ///
6040 /// If a calling token may not actually be a valid token at all due to a
6041 /// potentially hostile/untrusted provider of the token, call
6042 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
6043 /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
6044 /// never responds due to a calling token not being a real token (not really
6045 /// talking to sysmem). Another option is to call
6046 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
6047 /// which also validates the token along with converting it to a
6048 /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
6049 ///
6050 /// All table fields are currently required.
6051 ///
6052 /// - response `is_alternate`
6053 /// - true: The first parent node in common between the calling node and
6054 /// the `node_ref` `Node` is a `BufferCollectionTokenGroup`. This means
6055 /// that the calling `Node` and the `node_ref` `Node` will not have both
6056 /// their constraints apply - rather sysmem will choose one or the other
6057 /// of the constraints - never both. This is because only one child of
6058 /// a `BufferCollectionTokenGroup` is selected during logical
6059 /// allocation, with only that one child's subtree contributing to
6060 /// constraints aggregation.
6061 /// - false: The first parent node in common between the calling `Node`
6062 /// and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
6063 /// Currently, this means the first parent node in common is a
6064 /// `BufferCollectionToken` or `BufferCollection` (regardless of not
6065 /// `Release`ed). This means that the calling `Node` and the `node_ref`
6066 /// `Node` may have both their constraints apply during constraints
6067 /// aggregation of the logical allocation, if both `Node`(s) are
6068 /// selected by any parent `BufferCollectionTokenGroup`(s) involved. In
6069 /// this case, there is no `BufferCollectionTokenGroup` that will
6070 /// directly prevent the two `Node`(s) from both being selected and
6071 /// their constraints both aggregated, but even when false, one or both
6072 /// `Node`(s) may still be eliminated from consideration if one or both
6073 /// `Node`(s) has a direct or indirect parent
6074 /// `BufferCollectionTokenGroup` which selects a child subtree other
6075 /// than the subtree containing the calling `Node` or `node_ref` `Node`.
6076 /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
6077 /// associated with the same buffer collection as the calling `Node`.
6078 /// Another reason for this error is if the `node_ref` is an
6079 /// [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
6080 /// a real `node_ref` obtained from `GetNodeRef`.
6081 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
6082 /// `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
6083 /// the needed rights expected on a real `node_ref`.
6084 /// * No other failing status codes are returned by this call. However,
6085 /// sysmem may add additional codes in future, so the client should have
6086 /// sensible default handling for any failing status code.
6087 pub fn r#is_alternate_for(
6088 &self,
6089 mut payload: NodeIsAlternateForRequest,
6090 ___deadline: zx::MonotonicInstant,
6091 ) -> Result<NodeIsAlternateForResult, fidl::Error> {
6092 let _response = self.client.send_query::<
6093 NodeIsAlternateForRequest,
6094 fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
6095 >(
6096 &mut payload,
6097 0x3a58e00157e0825,
6098 fidl::encoding::DynamicFlags::FLEXIBLE,
6099 ___deadline,
6100 )?
6101 .into_result::<BufferCollectionTokenMarker>("is_alternate_for")?;
6102 Ok(_response.map(|x| x))
6103 }
6104
6105 /// Get the buffer collection ID. This ID is also available from
6106 /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
6107 /// within the collection).
6108 ///
6109 /// This call is mainly useful in situations where we can't convey a
6110 /// [`fuchsia.sysmem2/BufferCollectionToken`] or
6111 /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
6112 /// handle, which can be joined back up with a `BufferCollection` client end
6113 /// that was created via a different path. Prefer to convey a
6114 /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
6115 ///
6116 /// Trusting a `buffer_collection_id` value from a source other than sysmem
6117 /// is analogous to trusting a koid value from a source other than zircon.
6118 /// Both should be avoided unless really necessary, and both require
6119 /// caution. In some situations it may be reasonable to refer to a
6120 /// pre-established `BufferCollection` by `buffer_collection_id` via a
6121 /// protocol for efficiency reasons, but an incoming value purporting to be
6122 /// a `buffer_collection_id` is not sufficient alone to justify granting the
6123 /// sender of the `buffer_collection_id` any capability. The sender must
6124 /// first prove to a receiver that the sender has/had a VMO or has/had a
6125 /// `BufferCollectionToken` to the same collection by sending a handle that
6126 /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
6127 /// `buffer_collection_id` value. The receiver should take care to avoid
6128 /// assuming that a sender had a `BufferCollectionToken` in cases where the
6129 /// sender has only proven that the sender had a VMO.
6130 ///
6131 /// - response `buffer_collection_id` This ID is unique per buffer
6132 /// collection per boot. Each buffer is uniquely identified by the
6133 /// `buffer_collection_id` and `buffer_index` together.
6134 pub fn r#get_buffer_collection_id(
6135 &self,
6136 ___deadline: zx::MonotonicInstant,
6137 ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
6138 let _response = self.client.send_query::<
6139 fidl::encoding::EmptyPayload,
6140 fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
6141 >(
6142 (),
6143 0x77d19a494b78ba8c,
6144 fidl::encoding::DynamicFlags::FLEXIBLE,
6145 ___deadline,
6146 )?
6147 .into_result::<BufferCollectionTokenMarker>("get_buffer_collection_id")?;
6148 Ok(_response)
6149 }
6150
6151 /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
6152 /// created after this message to weak, which means that a client's `Node`
6153 /// client end (or a child created after this message) is not alone
6154 /// sufficient to keep allocated VMOs alive.
6155 ///
6156 /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
6157 /// `close_weak_asap`.
6158 ///
6159 /// This message is only permitted before the `Node` becomes ready for
6160 /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
6161 /// * `BufferCollectionToken`: any time
6162 /// * `BufferCollection`: before `SetConstraints`
6163 /// * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
6164 ///
6165 /// Currently, no conversion from strong `Node` to weak `Node` after ready
6166 /// for allocation is provided, but a client can simulate that by creating
6167 /// an additional `Node` before allocation and setting that additional
6168 /// `Node` to weak, and then potentially at some point later sending
6169 /// `Release` and closing the client end of the client's strong `Node`, but
6170 /// keeping the client's weak `Node`.
6171 ///
6172 /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
6173 /// collection failure (all `Node` client end(s) will see
6174 /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
6175 /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
6176 /// this situation until all `Node`(s) are ready for allocation. For initial
6177 /// allocation to succeed, at least one strong `Node` is required to exist
6178 /// at allocation time, but after that client receives VMO handles, that
6179 /// client can `BufferCollection.Release` and close the client end without
6180 /// causing this type of failure.
6181 ///
6182 /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
6183 /// imply `SetWeakOk` with `for_children_also` true, which can be sent
6184 /// separately as appropriate.
6185 pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
6186 self.client.send::<fidl::encoding::EmptyPayload>(
6187 (),
6188 0x22dd3ea514eeffe1,
6189 fidl::encoding::DynamicFlags::FLEXIBLE,
6190 )
6191 }
6192
6193 /// This indicates to sysmem that the client is prepared to pay attention to
6194 /// `close_weak_asap`.
6195 ///
6196 /// If sent, this message must be before
6197 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
6198 ///
6199 /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
6200 /// send this message before `WaitForAllBuffersAllocated`, or a parent
6201 /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
6202 /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
6203 /// trigger buffer collection failure.
6204 ///
6205 /// This message is necessary because weak sysmem VMOs have not always been
6206 /// a thing, so older clients are not aware of the need to pay attention to
6207 /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
6208 /// sysmem weak VMO handles asap. By having this message and requiring
6209 /// participants to indicate their acceptance of this aspect of the overall
6210 /// protocol, we avoid situations where an older client is delivered a weak
6211 /// VMO without any way for sysmem to get that VMO to close quickly later
6212 /// (and on a per-buffer basis).
6213 ///
6214 /// A participant that doesn't handle `close_weak_asap` and also doesn't
6215 /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
6216 /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
6217 /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
6218 /// same participant has a child/delegate which does retrieve VMOs, that
6219 /// child/delegate will need to send `SetWeakOk` before
6220 /// `WaitForAllBuffersAllocated`.
6221 ///
6222 /// + request `for_child_nodes_also` If present and true, this means direct
6223 /// child nodes of this node created after this message plus all
6224 /// descendants of those nodes will behave as if `SetWeakOk` was sent on
6225 /// those nodes. Any child node of this node that was created before this
6226 /// message is not included. This setting is "sticky" in the sense that a
6227 /// subsequent `SetWeakOk` without this bool set to true does not reset
6228 /// the server-side bool. If this creates a problem for a participant, a
6229 /// workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
6230 /// tokens instead, as appropriate. A participant should only set
6231 /// `for_child_nodes_also` true if the participant can really promise to
6232 /// obey `close_weak_asap` both for its own weak VMO handles, and for all
6233 /// weak VMO handles held by participants holding the corresponding child
6234 /// `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
6235 /// which are using sysmem(1) can be weak, despite the clients of those
6236 /// sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
6237 /// direct way to find out about `close_weak_asap`. This only applies to
6238 /// descendents of this `Node` which are using sysmem(1), not to this
6239 /// `Node` when converted directly from a sysmem2 token to a sysmem(1)
6240 /// token, which will fail allocation unless an ancestor of this `Node`
6241 /// specified `for_child_nodes_also` true.
6242 pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
6243 self.client.send::<NodeSetWeakOkRequest>(
6244 &mut payload,
6245 0x38a44fc4d7724be9,
6246 fidl::encoding::DynamicFlags::FLEXIBLE,
6247 )
6248 }
6249
6250 /// The server_end will be closed after this `Node` and any child nodes have
6251 /// have released their buffer counts, making those counts available for
6252 /// reservation by a different `Node` via
6253 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
6254 ///
6255 /// The `Node` buffer counts may not be released until the entire tree of
6256 /// `Node`(s) is closed or failed, because
6257 /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
6258 /// does not immediately un-reserve the `Node` buffer counts. Instead, the
6259 /// `Node` buffer counts remain reserved until the orphaned node is later
6260 /// cleaned up.
6261 ///
6262 /// If the `Node` exceeds a fairly large number of attached eventpair server
6263 /// ends, a log message will indicate this and the `Node` (and the
6264 /// appropriate) sub-tree will fail.
6265 ///
6266 /// The `server_end` will remain open when
6267 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
6268 /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
6269 /// [`fuchsia.sysmem2/BufferCollection`].
6270 ///
6271 /// This message can also be used with a
6272 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
6273 pub fn r#attach_node_tracking(
6274 &self,
6275 mut payload: NodeAttachNodeTrackingRequest,
6276 ) -> Result<(), fidl::Error> {
6277 self.client.send::<NodeAttachNodeTrackingRequest>(
6278 &mut payload,
6279 0x3f22f2a293d3cdac,
6280 fidl::encoding::DynamicFlags::FLEXIBLE,
6281 )
6282 }
6283
6284 /// Create additional [`fuchsia.sysmem2/BufferCollectionToken`](s) from this
6285 /// one, referring to the same buffer collection.
6286 ///
6287 /// The created tokens are children of this token in the
6288 /// [`fuchsia.sysmem2/Node`] heirarchy.
6289 ///
6290 /// This method can be used to add more participants, by transferring the
6291 /// newly created tokens to additional participants.
6292 ///
6293 /// A new token will be returned for each entry in the
6294 /// `rights_attenuation_masks` array.
6295 ///
6296 /// If the called token may not actually be a valid token due to a
6297 /// potentially hostile/untrusted provider of the token, consider using
6298 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
6299 /// instead of potentially getting stuck indefinitely if
6300 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] never responds
6301 /// due to the calling token not being a real token.
6302 ///
6303 /// In contrast to [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`], no
6304 /// separate [`fuchsia.sysmem2/Node.Sync`] is needed after calling this
6305 /// method, because the sync step is included in this call, at the cost of a
6306 /// round trip during this call.
6307 ///
6308 /// All tokens must be turned in to sysmem via
6309 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] or
6310 /// [`fuchsia.sysmem2/Node.Release`] for a `BufferCollection` to
6311 /// successfully allocate buffers (or to logically allocate buffers in the
6312 /// case of subtrees involving
6313 /// [`fuchsia.sysmem2/BufferCollectionToken.AttachToken`]).
6314 ///
6315 /// All table fields are currently required.
6316 ///
6317 /// + request `rights_attenuation_mask` In each entry of
6318 /// `rights_attenuation_masks`, rights bits that are zero will be absent
6319 /// in the buffer VMO rights obtainable via the corresponding returned
6320 /// token. This allows an initiator or intermediary participant to
6321 /// attenuate the rights available to a participant. This does not allow a
6322 /// participant to gain rights that the participant doesn't already have.
6323 /// The value `ZX_RIGHT_SAME_RIGHTS` can be used to specify that no
6324 /// attenuation should be applied.
6325 /// - response `tokens` The client ends of each newly created token.
6326 pub fn r#duplicate_sync(
6327 &self,
6328 mut payload: &BufferCollectionTokenDuplicateSyncRequest,
6329 ___deadline: zx::MonotonicInstant,
6330 ) -> Result<BufferCollectionTokenDuplicateSyncResponse, fidl::Error> {
6331 let _response = self.client.send_query::<
6332 BufferCollectionTokenDuplicateSyncRequest,
6333 fidl::encoding::FlexibleType<BufferCollectionTokenDuplicateSyncResponse>,
6334 >(
6335 payload,
6336 0x1c1af9919d1ca45c,
6337 fidl::encoding::DynamicFlags::FLEXIBLE,
6338 ___deadline,
6339 )?
6340 .into_result::<BufferCollectionTokenMarker>("duplicate_sync")?;
6341 Ok(_response)
6342 }
6343
6344 /// Create an additional [`fuchsia.sysmem2/BufferCollectionToken`] from this
6345 /// one, referring to the same buffer collection.
6346 ///
6347 /// The created token is a child of this token in the
6348 /// [`fuchsia.sysmem2/Node`] heirarchy.
6349 ///
6350 /// This method can be used to add a participant, by transferring the newly
6351 /// created token to another participant.
6352 ///
6353 /// This one-way message can be used instead of the two-way
6354 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] FIDL call in
6355 /// performance sensitive cases where it would be undesireable to wait for
6356 /// sysmem to respond to
6357 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] or when the
6358 /// client code isn't structured to make it easy to duplicate all the needed
6359 /// tokens at once.
6360 ///
6361 /// After sending one or more `Duplicate` messages, and before sending the
6362 /// newly created child tokens to other participants (or to other
6363 /// [`fuchsia.sysmem2/Allocator`] channels), the client must send a
6364 /// [`fuchsia.sysmem2/Node.Sync`] and wait for the `Sync` response. The
6365 /// `Sync` call can be made on the token, or on the `BufferCollection`
6366 /// obtained by passing this token to `BindSharedCollection`. Either will
6367 /// ensure that the server knows about the tokens created via `Duplicate`
6368 /// before the other participant sends the token to the server via separate
6369 /// `Allocator` channel.
6370 ///
6371 /// All tokens must be turned in via
6372 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] or
6373 /// [`fuchsia.sysmem2/Node.Release`] for a `BufferCollection` to
6374 /// successfully allocate buffers.
6375 ///
6376 /// All table fields are currently required.
6377 ///
6378 /// + request `rights_attenuation_mask` The rights bits that are zero in
6379 /// this mask will be absent in the buffer VMO rights obtainable via the
6380 /// client end of `token_request`. This allows an initiator or
6381 /// intermediary participant to attenuate the rights available to a
6382 /// delegate participant. This does not allow a participant to gain rights
6383 /// that the participant doesn't already have. The value
6384 /// `ZX_RIGHT_SAME_RIGHTS` can be used to specify that no attenuation
6385 /// should be applied.
6386 /// + These values for rights_attenuation_mask result in no attenuation:
6387 /// + `ZX_RIGHT_SAME_RIGHTS` (preferred)
6388 /// + 0xFFFFFFFF (this is reasonable when an attenuation mask is
6389 /// computed)
6390 /// + 0 (deprecated - do not use 0 - an ERROR will go to the log)
6391 /// + request `token_request` is the server end of a `BufferCollectionToken`
6392 /// channel. The client end of this channel acts as another participant in
6393 /// the shared buffer collection.
6394 pub fn r#duplicate(
6395 &self,
6396 mut payload: BufferCollectionTokenDuplicateRequest,
6397 ) -> Result<(), fidl::Error> {
6398 self.client.send::<BufferCollectionTokenDuplicateRequest>(
6399 &mut payload,
6400 0x73e78f92ee7fb887,
6401 fidl::encoding::DynamicFlags::FLEXIBLE,
6402 )
6403 }
6404
6405 /// Set this [`fuchsia.sysmem2/BufferCollectionToken`] to dispensable.
6406 ///
6407 /// When the `BufferCollectionToken` is converted to a
6408 /// [`fuchsia.sysmem2/BufferCollection`], the dispensable status applies to
6409 /// the `BufferCollection` also.
6410 ///
6411 /// Normally, if a client closes a [`fuchsia.sysmem2/BufferCollection`]
6412 /// client end without having sent
6413 /// [`fuchsia.sysmem2/BufferCollection.Release`] first, the
6414 /// `BufferCollection` [`fuchisa.sysmem2/Node`] will fail, which also
6415 /// propagates failure to the parent [`fuchsia.sysmem2/Node`] and so on up
6416 /// to the root `Node`, which fails the whole buffer collection. In
6417 /// contrast, a dispensable `Node` can fail after buffers are allocated
6418 /// without causing failure of its parent in the [`fuchsia.sysmem2/Node`]
6419 /// heirarchy.
6420 ///
6421 /// The dispensable `Node` participates in constraints aggregation along
6422 /// with its parent before buffer allocation. If the dispensable `Node`
6423 /// fails before buffers are allocated, the failure propagates to the
6424 /// dispensable `Node`'s parent.
6425 ///
6426 /// After buffers are allocated, failure of the dispensable `Node` (or any
6427 /// child of the dispensable `Node`) does not propagate to the dispensable
6428 /// `Node`'s parent. Failure does propagate from a normal child of a
6429 /// dispensable `Node` to the dispensable `Node`. Failure of a child is
6430 /// blocked from reaching its parent if the child is attached using
6431 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`], or if the child is
6432 /// dispensable and the failure occurred after allocation.
6433 ///
6434 /// A dispensable `Node` can be used in cases where a participant needs to
6435 /// provide constraints, but after buffers are allocated, the participant
6436 /// can fail without causing buffer collection failure from the parent
6437 /// `Node`'s point of view.
6438 ///
6439 /// In contrast, `BufferCollection.AttachToken` can be used to create a
6440 /// `BufferCollectionToken` which does not participate in constraints
6441 /// aggregation with its parent `Node`, and whose failure at any time does
6442 /// not propagate to its parent `Node`, and whose potential delay providing
6443 /// constraints does not prevent the parent `Node` from completing its
6444 /// buffer allocation.
6445 ///
6446 /// An initiator (creator of the root `Node` using
6447 /// [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`]) may in some
6448 /// scenarios choose to initially use a dispensable `Node` for a first
6449 /// instance of a participant, and then later if the first instance of that
6450 /// participant fails, a new second instance of that participant my be given
6451 /// a `BufferCollectionToken` created with `AttachToken`.
6452 ///
6453 /// Normally a client will `SetDispensable` on a `BufferCollectionToken`
6454 /// shortly before sending the dispensable `BufferCollectionToken` to a
6455 /// delegate participant. Because `SetDispensable` prevents propagation of
6456 /// child `Node` failure to parent `Node`(s), if the client was relying on
6457 /// noticing child failure via failure of the parent `Node` retained by the
6458 /// client, the client may instead need to notice failure via other means.
6459 /// If other means aren't available/convenient, the client can instead
6460 /// retain the dispensable `Node` and create a child `Node` under that to
6461 /// send to the delegate participant, retaining this `Node` in order to
6462 /// notice failure of the subtree rooted at this `Node` via this `Node`'s
6463 /// ZX_CHANNEL_PEER_CLOSED signal, and take whatever action is appropriate
6464 /// (e.g. starting a new instance of the delegate participant and handing it
6465 /// a `BufferCollectionToken` created using
6466 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`], or propagate failure
6467 /// and clean up in a client-specific way).
6468 ///
6469 /// While it is possible (and potentially useful) to `SetDispensable` on a
6470 /// direct child of a `BufferCollectionTokenGroup` `Node`, it isn't possible
6471 /// to later replace a failed dispensable `Node` that was a direct child of
6472 /// a `BufferCollectionTokenGroup` with a new token using `AttachToken`
6473 /// (since there's no `AttachToken` on a group). Instead, to enable
6474 /// `AttachToken` replacement in this case, create an additional
6475 /// non-dispensable token that's a direct child of the group and make the
6476 /// existing dispensable token a child of the additional token. This way,
6477 /// the additional token that is a direct child of the group has
6478 /// `BufferCollection.AttachToken` which can be used to replace the failed
6479 /// dispensable token.
6480 ///
6481 /// `SetDispensable` on an already-dispensable token is idempotent.
6482 pub fn r#set_dispensable(&self) -> Result<(), fidl::Error> {
6483 self.client.send::<fidl::encoding::EmptyPayload>(
6484 (),
6485 0x228acf979254df8b,
6486 fidl::encoding::DynamicFlags::FLEXIBLE,
6487 )
6488 }
6489
6490 /// Create a logical OR among a set of tokens, called a
6491 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
6492 ///
6493 /// Most sysmem clients and many participants don't need to care about this
6494 /// message or about `BufferCollectionTokenGroup`(s). However, in some cases
6495 /// a participant wants to attempt to include one set of delegate
6496 /// participants, but if constraints don't combine successfully that way,
6497 /// fall back to a different (possibly overlapping) set of delegate
6498 /// participants, and/or fall back to a less demanding strategy (in terms of
6499 /// how strict the [`fuchisa.sysmem2/BufferCollectionConstraints`] are,
6500 /// across all involved delegate participants). In such cases, a
6501 /// `BufferCollectionTokenGroup` is useful.
6502 ///
6503 /// A `BufferCollectionTokenGroup` is used to create a 1 of N OR among N
6504 /// child [`fuchsia.sysmem2/BufferCollectionToken`](s). The child tokens
6505 /// which are not selected during aggregation will fail (close), which a
6506 /// potential participant should notice when their `BufferCollection`
6507 /// channel client endpoint sees PEER_CLOSED, allowing the participant to
6508 /// clean up the speculative usage that didn't end up happening (this is
6509 /// simimlar to a normal `BufferCollection` server end closing on failure to
6510 /// allocate a logical buffer collection or later async failure of a buffer
6511 /// collection).
6512 ///
6513 /// See comments on protocol `BufferCollectionTokenGroup`.
6514 ///
6515 /// Any `rights_attenuation_mask` or `AttachToken`/`SetDispensable` to be
6516 /// applied to the whole group can be achieved with a
6517 /// `BufferCollectionToken` for this purpose as a direct parent of the
6518 /// `BufferCollectionTokenGroup`.
6519 ///
6520 /// All table fields are currently required.
6521 ///
6522 /// + request `group_request` The server end of a
6523 /// `BufferCollectionTokenGroup` channel to be served by sysmem.
6524 pub fn r#create_buffer_collection_token_group(
6525 &self,
6526 mut payload: BufferCollectionTokenCreateBufferCollectionTokenGroupRequest,
6527 ) -> Result<(), fidl::Error> {
6528 self.client.send::<BufferCollectionTokenCreateBufferCollectionTokenGroupRequest>(
6529 &mut payload,
6530 0x30f8d48e77bd36f2,
6531 fidl::encoding::DynamicFlags::FLEXIBLE,
6532 )
6533 }
6534}
6535
6536#[cfg(target_os = "fuchsia")]
6537impl From<BufferCollectionTokenSynchronousProxy> for zx::NullableHandle {
6538 fn from(value: BufferCollectionTokenSynchronousProxy) -> Self {
6539 value.into_channel().into()
6540 }
6541}
6542
6543#[cfg(target_os = "fuchsia")]
6544impl From<fidl::Channel> for BufferCollectionTokenSynchronousProxy {
6545 fn from(value: fidl::Channel) -> Self {
6546 Self::new(value)
6547 }
6548}
6549
6550#[cfg(target_os = "fuchsia")]
6551impl fidl::endpoints::FromClient for BufferCollectionTokenSynchronousProxy {
6552 type Protocol = BufferCollectionTokenMarker;
6553
6554 fn from_client(value: fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>) -> Self {
6555 Self::new(value.into_channel())
6556 }
6557}
6558
6559#[derive(Debug, Clone)]
6560pub struct BufferCollectionTokenProxy {
6561 client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
6562}
6563
6564impl fidl::endpoints::Proxy for BufferCollectionTokenProxy {
6565 type Protocol = BufferCollectionTokenMarker;
6566
6567 fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
6568 Self::new(inner)
6569 }
6570
6571 fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
6572 self.client.into_channel().map_err(|client| Self { client })
6573 }
6574
6575 fn as_channel(&self) -> &::fidl::AsyncChannel {
6576 self.client.as_channel()
6577 }
6578}
6579
6580impl BufferCollectionTokenProxy {
6581 /// Create a new Proxy for fuchsia.sysmem2/BufferCollectionToken.
6582 pub fn new(channel: ::fidl::AsyncChannel) -> Self {
6583 let protocol_name =
6584 <BufferCollectionTokenMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
6585 Self { client: fidl::client::Client::new(channel, protocol_name) }
6586 }
6587
6588 /// Get a Stream of events from the remote end of the protocol.
6589 ///
6590 /// # Panics
6591 ///
6592 /// Panics if the event stream was already taken.
6593 pub fn take_event_stream(&self) -> BufferCollectionTokenEventStream {
6594 BufferCollectionTokenEventStream { event_receiver: self.client.take_event_receiver() }
6595 }
6596
6597 /// Ensure that previous messages have been received server side. This is
6598 /// particularly useful after previous messages that created new tokens,
6599 /// because a token must be known to the sysmem server before sending the
6600 /// token to another participant.
6601 ///
6602 /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
6603 /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
6604 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
6605 /// to mitigate the possibility of a hostile/fake
6606 /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
6607 /// Another way is to pass the token to
6608 /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
6609 /// the token as part of exchanging it for a
6610 /// [`fuchsia.sysmem2/BufferCollection`] channel, and
6611 /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
6612 /// of stalling.
6613 ///
6614 /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
6615 /// and then starting and completing a `Sync`, it's then safe to send the
6616 /// `BufferCollectionToken` client ends to other participants knowing the
6617 /// server will recognize the tokens when they're sent by the other
6618 /// participants to sysmem in a
6619 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
6620 /// efficient way to create tokens while avoiding unnecessary round trips.
6621 ///
6622 /// Other options include waiting for each
6623 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
6624 /// individually (using separate call to `Sync` after each), or calling
6625 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
6626 /// converted to a `BufferCollection` via
6627 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
6628 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
6629 /// the sync step and can create multiple tokens at once.
6630 pub fn r#sync(
6631 &self,
6632 ) -> fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect> {
6633 BufferCollectionTokenProxyInterface::r#sync(self)
6634 }
6635
6636 /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
6637 ///
6638 /// Normally a participant will convert a `BufferCollectionToken` into a
6639 /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
6640 /// `Release` via the token (and then close the channel immediately or
6641 /// shortly later in response to server closing the server end), which
6642 /// avoids causing buffer collection failure. Without a prior `Release`,
6643 /// closing the `BufferCollectionToken` client end will cause buffer
6644 /// collection failure.
6645 ///
6646 /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
6647 ///
6648 /// By default the server handles unexpected closure of a
6649 /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
6650 /// first) by failing the buffer collection. Partly this is to expedite
6651 /// closing VMO handles to reclaim memory when any participant fails. If a
6652 /// participant would like to cleanly close a `BufferCollection` without
6653 /// causing buffer collection failure, the participant can send `Release`
6654 /// before closing the `BufferCollection` client end. The `Release` can
6655 /// occur before or after `SetConstraints`. If before `SetConstraints`, the
6656 /// buffer collection won't require constraints from this node in order to
6657 /// allocate. If after `SetConstraints`, the constraints are retained and
6658 /// aggregated, despite the lack of `BufferCollection` connection at the
6659 /// time of constraints aggregation.
6660 ///
6661 /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
6662 ///
6663 /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
6664 /// end (without `Release` first) will trigger failure of the buffer
6665 /// collection. To close a `BufferCollectionTokenGroup` channel without
6666 /// failing the buffer collection, ensure that AllChildrenPresent() has been
6667 /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
6668 /// client end.
6669 ///
6670 /// If `Release` occurs before
6671 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
6672 /// buffer collection will fail (triggered by reception of `Release` without
6673 /// prior `AllChildrenPresent`). This is intentionally not analogous to how
6674 /// [`fuchsia.sysmem2/BufferCollection.Release`] without
6675 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
6676 /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
6677 /// close requires `AllChildrenPresent` (if not already sent), then
6678 /// `Release`, then close client end.
6679 ///
6680 /// If `Release` occurs after `AllChildrenPresent`, the children and all
6681 /// their constraints remain intact (just as they would if the
6682 /// `BufferCollectionTokenGroup` channel had remained open), and the client
6683 /// end close doesn't trigger buffer collection failure.
6684 ///
6685 /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
6686 ///
6687 /// For brevity, the per-channel-protocol paragraphs above ignore the
6688 /// separate failure domain created by
6689 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
6690 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
6691 /// unexpectedly closes (without `Release` first) and that client end is
6692 /// under a failure domain, instead of failing the whole buffer collection,
6693 /// the failure domain is failed, but the buffer collection itself is
6694 /// isolated from failure of the failure domain. Such failure domains can be
6695 /// nested, in which case only the inner-most failure domain in which the
6696 /// `Node` resides fails.
6697 pub fn r#release(&self) -> Result<(), fidl::Error> {
6698 BufferCollectionTokenProxyInterface::r#release(self)
6699 }
6700
6701 /// Set a name for VMOs in this buffer collection.
6702 ///
6703 /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
6704 /// will be truncated to fit. The name of the vmo will be suffixed with the
6705 /// buffer index within the collection (if the suffix fits within
6706 /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
6707 /// listed in the inspect data.
6708 ///
6709 /// The name only affects VMOs allocated after the name is set; this call
6710 /// does not rename existing VMOs. If multiple clients set different names
6711 /// then the larger priority value will win. Setting a new name with the
6712 /// same priority as a prior name doesn't change the name.
6713 ///
6714 /// All table fields are currently required.
6715 ///
6716 /// + request `priority` The name is only set if this is the first `SetName`
6717 /// or if `priority` is greater than any previous `priority` value in
6718 /// prior `SetName` calls across all `Node`(s) of this buffer collection.
6719 /// + request `name` The name for VMOs created under this buffer collection.
6720 pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
6721 BufferCollectionTokenProxyInterface::r#set_name(self, payload)
6722 }
6723
6724 /// Set information about the current client that can be used by sysmem to
6725 /// help diagnose leaking memory and allocation stalls waiting for a
6726 /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
6727 ///
6728 /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
6729 /// `Node`(s) derived from this `Node`, unless overriden by
6730 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
6731 /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
6732 ///
6733 /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
6734 /// `Allocator` is the most efficient way to ensure that all
6735 /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
6736 /// set, and is also more efficient than separately sending the same debug
6737 /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
6738 /// created [`fuchsia.sysmem2/Node`].
6739 ///
6740 /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
6741 /// indicate which client is closing their channel first, leading to subtree
6742 /// failure (which can be normal if the purpose of the subtree is over, but
6743 /// if happening earlier than expected, the client-channel-specific name can
6744 /// help diagnose where the failure is first coming from, from sysmem's
6745 /// point of view).
6746 ///
6747 /// All table fields are currently required.
6748 ///
6749 /// + request `name` This can be an arbitrary string, but the current
6750 /// process name (see `fsl::GetCurrentProcessName`) is a good default.
6751 /// + request `id` This can be an arbitrary id, but the current process ID
6752 /// (see `fsl::GetCurrentProcessKoid`) is a good default.
6753 pub fn r#set_debug_client_info(
6754 &self,
6755 mut payload: &NodeSetDebugClientInfoRequest,
6756 ) -> Result<(), fidl::Error> {
6757 BufferCollectionTokenProxyInterface::r#set_debug_client_info(self, payload)
6758 }
6759
6760 /// Sysmem logs a warning if sysmem hasn't seen
6761 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
6762 /// within 5 seconds after creation of a new collection.
6763 ///
6764 /// Clients can call this method to change when the log is printed. If
6765 /// multiple client set the deadline, it's unspecified which deadline will
6766 /// take effect.
6767 ///
6768 /// In most cases the default works well.
6769 ///
6770 /// All table fields are currently required.
6771 ///
6772 /// + request `deadline` The time at which sysmem will start trying to log
6773 /// the warning, unless all constraints are with sysmem by then.
6774 pub fn r#set_debug_timeout_log_deadline(
6775 &self,
6776 mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
6777 ) -> Result<(), fidl::Error> {
6778 BufferCollectionTokenProxyInterface::r#set_debug_timeout_log_deadline(self, payload)
6779 }
6780
6781 /// This enables verbose logging for the buffer collection.
6782 ///
6783 /// Verbose logging includes constraints set via
6784 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
6785 /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
6786 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
6787 /// the tree of `Node`(s).
6788 ///
6789 /// Normally sysmem prints only a single line complaint when aggregation
6790 /// fails, with just the specific detailed reason that aggregation failed,
6791 /// with little surrounding context. While this is often enough to diagnose
6792 /// a problem if only a small change was made and everything was working
6793 /// before the small change, it's often not particularly helpful for getting
6794 /// a new buffer collection to work for the first time. Especially with
6795 /// more complex trees of nodes, involving things like
6796 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
6797 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
6798 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
6799 /// subtrees of nodes, verbose logging may help in diagnosing what the tree
6800 /// looks like and why it's failing a logical allocation, or why a tree or
6801 /// subtree is failing sooner than expected.
6802 ///
6803 /// The intent of the extra logging is to be acceptable from a performance
6804 /// point of view, under the assumption that verbose logging is only enabled
6805 /// on a low number of buffer collections. If we're not tracking down a bug,
6806 /// we shouldn't send this message.
6807 pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
6808 BufferCollectionTokenProxyInterface::r#set_verbose_logging(self)
6809 }
6810
6811 /// This gets a handle that can be used as a parameter to
6812 /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
6813 /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
6814 /// client obtained this handle from this `Node`.
6815 ///
6816 /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
6817 /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
6818 /// despite the two calls typically being on different channels.
6819 ///
6820 /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
6821 ///
6822 /// All table fields are currently required.
6823 ///
6824 /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
6825 /// different `Node` channel, to prove that the client obtained the handle
6826 /// from this `Node`.
6827 pub fn r#get_node_ref(
6828 &self,
6829 ) -> fidl::client::QueryResponseFut<
6830 NodeGetNodeRefResponse,
6831 fidl::encoding::DefaultFuchsiaResourceDialect,
6832 > {
6833 BufferCollectionTokenProxyInterface::r#get_node_ref(self)
6834 }
6835
6836 /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
6837 /// rooted at a different child token of a common parent
6838 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
6839 /// passed-in `node_ref`.
6840 ///
6841 /// This call is for assisting with admission control de-duplication, and
6842 /// with debugging.
6843 ///
6844 /// The `node_ref` must be obtained using
6845 /// [`fuchsia.sysmem2/Node.GetNodeRef`].
6846 ///
6847 /// The `node_ref` can be a duplicated handle; it's not necessary to call
6848 /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
6849 ///
6850 /// If a calling token may not actually be a valid token at all due to a
6851 /// potentially hostile/untrusted provider of the token, call
6852 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
6853 /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
6854 /// never responds due to a calling token not being a real token (not really
6855 /// talking to sysmem). Another option is to call
6856 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
6857 /// which also validates the token along with converting it to a
6858 /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
6859 ///
6860 /// All table fields are currently required.
6861 ///
6862 /// - response `is_alternate`
6863 /// - true: The first parent node in common between the calling node and
6864 /// the `node_ref` `Node` is a `BufferCollectionTokenGroup`. This means
6865 /// that the calling `Node` and the `node_ref` `Node` will not have both
6866 /// their constraints apply - rather sysmem will choose one or the other
6867 /// of the constraints - never both. This is because only one child of
6868 /// a `BufferCollectionTokenGroup` is selected during logical
6869 /// allocation, with only that one child's subtree contributing to
6870 /// constraints aggregation.
6871 /// - false: The first parent node in common between the calling `Node`
6872 /// and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
6873 /// Currently, this means the first parent node in common is a
6874 /// `BufferCollectionToken` or `BufferCollection` (regardless of not
6875 /// `Release`ed). This means that the calling `Node` and the `node_ref`
6876 /// `Node` may have both their constraints apply during constraints
6877 /// aggregation of the logical allocation, if both `Node`(s) are
6878 /// selected by any parent `BufferCollectionTokenGroup`(s) involved. In
6879 /// this case, there is no `BufferCollectionTokenGroup` that will
6880 /// directly prevent the two `Node`(s) from both being selected and
6881 /// their constraints both aggregated, but even when false, one or both
6882 /// `Node`(s) may still be eliminated from consideration if one or both
6883 /// `Node`(s) has a direct or indirect parent
6884 /// `BufferCollectionTokenGroup` which selects a child subtree other
6885 /// than the subtree containing the calling `Node` or `node_ref` `Node`.
6886 /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
6887 /// associated with the same buffer collection as the calling `Node`.
6888 /// Another reason for this error is if the `node_ref` is an
6889 /// [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
6890 /// a real `node_ref` obtained from `GetNodeRef`.
6891 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
6892 /// `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
6893 /// the needed rights expected on a real `node_ref`.
6894 /// * No other failing status codes are returned by this call. However,
6895 /// sysmem may add additional codes in future, so the client should have
6896 /// sensible default handling for any failing status code.
6897 pub fn r#is_alternate_for(
6898 &self,
6899 mut payload: NodeIsAlternateForRequest,
6900 ) -> fidl::client::QueryResponseFut<
6901 NodeIsAlternateForResult,
6902 fidl::encoding::DefaultFuchsiaResourceDialect,
6903 > {
6904 BufferCollectionTokenProxyInterface::r#is_alternate_for(self, payload)
6905 }
6906
6907 /// Get the buffer collection ID. This ID is also available from
6908 /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
6909 /// within the collection).
6910 ///
6911 /// This call is mainly useful in situations where we can't convey a
6912 /// [`fuchsia.sysmem2/BufferCollectionToken`] or
6913 /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
6914 /// handle, which can be joined back up with a `BufferCollection` client end
6915 /// that was created via a different path. Prefer to convey a
6916 /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
6917 ///
6918 /// Trusting a `buffer_collection_id` value from a source other than sysmem
6919 /// is analogous to trusting a koid value from a source other than zircon.
6920 /// Both should be avoided unless really necessary, and both require
6921 /// caution. In some situations it may be reasonable to refer to a
6922 /// pre-established `BufferCollection` by `buffer_collection_id` via a
6923 /// protocol for efficiency reasons, but an incoming value purporting to be
6924 /// a `buffer_collection_id` is not sufficient alone to justify granting the
6925 /// sender of the `buffer_collection_id` any capability. The sender must
6926 /// first prove to a receiver that the sender has/had a VMO or has/had a
6927 /// `BufferCollectionToken` to the same collection by sending a handle that
6928 /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
6929 /// `buffer_collection_id` value. The receiver should take care to avoid
6930 /// assuming that a sender had a `BufferCollectionToken` in cases where the
6931 /// sender has only proven that the sender had a VMO.
6932 ///
6933 /// - response `buffer_collection_id` This ID is unique per buffer
6934 /// collection per boot. Each buffer is uniquely identified by the
6935 /// `buffer_collection_id` and `buffer_index` together.
6936 pub fn r#get_buffer_collection_id(
6937 &self,
6938 ) -> fidl::client::QueryResponseFut<
6939 NodeGetBufferCollectionIdResponse,
6940 fidl::encoding::DefaultFuchsiaResourceDialect,
6941 > {
6942 BufferCollectionTokenProxyInterface::r#get_buffer_collection_id(self)
6943 }
6944
6945 /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
6946 /// created after this message to weak, which means that a client's `Node`
6947 /// client end (or a child created after this message) is not alone
6948 /// sufficient to keep allocated VMOs alive.
6949 ///
6950 /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
6951 /// `close_weak_asap`.
6952 ///
6953 /// This message is only permitted before the `Node` becomes ready for
6954 /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
6955 /// * `BufferCollectionToken`: any time
6956 /// * `BufferCollection`: before `SetConstraints`
6957 /// * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
6958 ///
6959 /// Currently, no conversion from strong `Node` to weak `Node` after ready
6960 /// for allocation is provided, but a client can simulate that by creating
6961 /// an additional `Node` before allocation and setting that additional
6962 /// `Node` to weak, and then potentially at some point later sending
6963 /// `Release` and closing the client end of the client's strong `Node`, but
6964 /// keeping the client's weak `Node`.
6965 ///
6966 /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
6967 /// collection failure (all `Node` client end(s) will see
6968 /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
6969 /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
6970 /// this situation until all `Node`(s) are ready for allocation. For initial
6971 /// allocation to succeed, at least one strong `Node` is required to exist
6972 /// at allocation time, but after that client receives VMO handles, that
6973 /// client can `BufferCollection.Release` and close the client end without
6974 /// causing this type of failure.
6975 ///
6976 /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
6977 /// imply `SetWeakOk` with `for_children_also` true, which can be sent
6978 /// separately as appropriate.
6979 pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
6980 BufferCollectionTokenProxyInterface::r#set_weak(self)
6981 }
6982
6983 /// This indicates to sysmem that the client is prepared to pay attention to
6984 /// `close_weak_asap`.
6985 ///
6986 /// If sent, this message must be before
6987 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
6988 ///
6989 /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
6990 /// send this message before `WaitForAllBuffersAllocated`, or a parent
6991 /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
6992 /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
6993 /// trigger buffer collection failure.
6994 ///
6995 /// This message is necessary because weak sysmem VMOs have not always been
6996 /// a thing, so older clients are not aware of the need to pay attention to
6997 /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
6998 /// sysmem weak VMO handles asap. By having this message and requiring
6999 /// participants to indicate their acceptance of this aspect of the overall
7000 /// protocol, we avoid situations where an older client is delivered a weak
7001 /// VMO without any way for sysmem to get that VMO to close quickly later
7002 /// (and on a per-buffer basis).
7003 ///
7004 /// A participant that doesn't handle `close_weak_asap` and also doesn't
7005 /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
7006 /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
7007 /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
7008 /// same participant has a child/delegate which does retrieve VMOs, that
7009 /// child/delegate will need to send `SetWeakOk` before
7010 /// `WaitForAllBuffersAllocated`.
7011 ///
7012 /// + request `for_child_nodes_also` If present and true, this means direct
7013 /// child nodes of this node created after this message plus all
7014 /// descendants of those nodes will behave as if `SetWeakOk` was sent on
7015 /// those nodes. Any child node of this node that was created before this
7016 /// message is not included. This setting is "sticky" in the sense that a
7017 /// subsequent `SetWeakOk` without this bool set to true does not reset
7018 /// the server-side bool. If this creates a problem for a participant, a
7019 /// workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
7020 /// tokens instead, as appropriate. A participant should only set
7021 /// `for_child_nodes_also` true if the participant can really promise to
7022 /// obey `close_weak_asap` both for its own weak VMO handles, and for all
7023 /// weak VMO handles held by participants holding the corresponding child
7024 /// `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
7025 /// which are using sysmem(1) can be weak, despite the clients of those
7026 /// sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
7027 /// direct way to find out about `close_weak_asap`. This only applies to
7028 /// descendents of this `Node` which are using sysmem(1), not to this
7029 /// `Node` when converted directly from a sysmem2 token to a sysmem(1)
7030 /// token, which will fail allocation unless an ancestor of this `Node`
7031 /// specified `for_child_nodes_also` true.
7032 pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
7033 BufferCollectionTokenProxyInterface::r#set_weak_ok(self, payload)
7034 }
7035
7036 /// The server_end will be closed after this `Node` and any child nodes have
7037 /// have released their buffer counts, making those counts available for
7038 /// reservation by a different `Node` via
7039 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
7040 ///
7041 /// The `Node` buffer counts may not be released until the entire tree of
7042 /// `Node`(s) is closed or failed, because
7043 /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
7044 /// does not immediately un-reserve the `Node` buffer counts. Instead, the
7045 /// `Node` buffer counts remain reserved until the orphaned node is later
7046 /// cleaned up.
7047 ///
7048 /// If the `Node` exceeds a fairly large number of attached eventpair server
7049 /// ends, a log message will indicate this and the `Node` (and the
7050 /// appropriate) sub-tree will fail.
7051 ///
7052 /// The `server_end` will remain open when
7053 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
7054 /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
7055 /// [`fuchsia.sysmem2/BufferCollection`].
7056 ///
7057 /// This message can also be used with a
7058 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
7059 pub fn r#attach_node_tracking(
7060 &self,
7061 mut payload: NodeAttachNodeTrackingRequest,
7062 ) -> Result<(), fidl::Error> {
7063 BufferCollectionTokenProxyInterface::r#attach_node_tracking(self, payload)
7064 }
7065
7066 /// Create additional [`fuchsia.sysmem2/BufferCollectionToken`](s) from this
7067 /// one, referring to the same buffer collection.
7068 ///
7069 /// The created tokens are children of this token in the
7070 /// [`fuchsia.sysmem2/Node`] heirarchy.
7071 ///
7072 /// This method can be used to add more participants, by transferring the
7073 /// newly created tokens to additional participants.
7074 ///
7075 /// A new token will be returned for each entry in the
7076 /// `rights_attenuation_masks` array.
7077 ///
7078 /// If the called token may not actually be a valid token due to a
7079 /// potentially hostile/untrusted provider of the token, consider using
7080 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
7081 /// instead of potentially getting stuck indefinitely if
7082 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] never responds
7083 /// due to the calling token not being a real token.
7084 ///
7085 /// In contrast to [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`], no
7086 /// separate [`fuchsia.sysmem2/Node.Sync`] is needed after calling this
7087 /// method, because the sync step is included in this call, at the cost of a
7088 /// round trip during this call.
7089 ///
7090 /// All tokens must be turned in to sysmem via
7091 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] or
7092 /// [`fuchsia.sysmem2/Node.Release`] for a `BufferCollection` to
7093 /// successfully allocate buffers (or to logically allocate buffers in the
7094 /// case of subtrees involving
7095 /// [`fuchsia.sysmem2/BufferCollectionToken.AttachToken`]).
7096 ///
7097 /// All table fields are currently required.
7098 ///
7099 /// + request `rights_attenuation_mask` In each entry of
7100 /// `rights_attenuation_masks`, rights bits that are zero will be absent
7101 /// in the buffer VMO rights obtainable via the corresponding returned
7102 /// token. This allows an initiator or intermediary participant to
7103 /// attenuate the rights available to a participant. This does not allow a
7104 /// participant to gain rights that the participant doesn't already have.
7105 /// The value `ZX_RIGHT_SAME_RIGHTS` can be used to specify that no
7106 /// attenuation should be applied.
7107 /// - response `tokens` The client ends of each newly created token.
7108 pub fn r#duplicate_sync(
7109 &self,
7110 mut payload: &BufferCollectionTokenDuplicateSyncRequest,
7111 ) -> fidl::client::QueryResponseFut<
7112 BufferCollectionTokenDuplicateSyncResponse,
7113 fidl::encoding::DefaultFuchsiaResourceDialect,
7114 > {
7115 BufferCollectionTokenProxyInterface::r#duplicate_sync(self, payload)
7116 }
7117
7118 /// Create an additional [`fuchsia.sysmem2/BufferCollectionToken`] from this
7119 /// one, referring to the same buffer collection.
7120 ///
7121 /// The created token is a child of this token in the
7122 /// [`fuchsia.sysmem2/Node`] heirarchy.
7123 ///
7124 /// This method can be used to add a participant, by transferring the newly
7125 /// created token to another participant.
7126 ///
7127 /// This one-way message can be used instead of the two-way
7128 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] FIDL call in
7129 /// performance sensitive cases where it would be undesireable to wait for
7130 /// sysmem to respond to
7131 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] or when the
7132 /// client code isn't structured to make it easy to duplicate all the needed
7133 /// tokens at once.
7134 ///
7135 /// After sending one or more `Duplicate` messages, and before sending the
7136 /// newly created child tokens to other participants (or to other
7137 /// [`fuchsia.sysmem2/Allocator`] channels), the client must send a
7138 /// [`fuchsia.sysmem2/Node.Sync`] and wait for the `Sync` response. The
7139 /// `Sync` call can be made on the token, or on the `BufferCollection`
7140 /// obtained by passing this token to `BindSharedCollection`. Either will
7141 /// ensure that the server knows about the tokens created via `Duplicate`
7142 /// before the other participant sends the token to the server via separate
7143 /// `Allocator` channel.
7144 ///
7145 /// All tokens must be turned in via
7146 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] or
7147 /// [`fuchsia.sysmem2/Node.Release`] for a `BufferCollection` to
7148 /// successfully allocate buffers.
7149 ///
7150 /// All table fields are currently required.
7151 ///
7152 /// + request `rights_attenuation_mask` The rights bits that are zero in
7153 /// this mask will be absent in the buffer VMO rights obtainable via the
7154 /// client end of `token_request`. This allows an initiator or
7155 /// intermediary participant to attenuate the rights available to a
7156 /// delegate participant. This does not allow a participant to gain rights
7157 /// that the participant doesn't already have. The value
7158 /// `ZX_RIGHT_SAME_RIGHTS` can be used to specify that no attenuation
7159 /// should be applied.
7160 /// + These values for rights_attenuation_mask result in no attenuation:
7161 /// + `ZX_RIGHT_SAME_RIGHTS` (preferred)
7162 /// + 0xFFFFFFFF (this is reasonable when an attenuation mask is
7163 /// computed)
7164 /// + 0 (deprecated - do not use 0 - an ERROR will go to the log)
7165 /// + request `token_request` is the server end of a `BufferCollectionToken`
7166 /// channel. The client end of this channel acts as another participant in
7167 /// the shared buffer collection.
7168 pub fn r#duplicate(
7169 &self,
7170 mut payload: BufferCollectionTokenDuplicateRequest,
7171 ) -> Result<(), fidl::Error> {
7172 BufferCollectionTokenProxyInterface::r#duplicate(self, payload)
7173 }
7174
7175 /// Set this [`fuchsia.sysmem2/BufferCollectionToken`] to dispensable.
7176 ///
7177 /// When the `BufferCollectionToken` is converted to a
7178 /// [`fuchsia.sysmem2/BufferCollection`], the dispensable status applies to
7179 /// the `BufferCollection` also.
7180 ///
7181 /// Normally, if a client closes a [`fuchsia.sysmem2/BufferCollection`]
7182 /// client end without having sent
7183 /// [`fuchsia.sysmem2/BufferCollection.Release`] first, the
7184 /// `BufferCollection` [`fuchisa.sysmem2/Node`] will fail, which also
7185 /// propagates failure to the parent [`fuchsia.sysmem2/Node`] and so on up
7186 /// to the root `Node`, which fails the whole buffer collection. In
7187 /// contrast, a dispensable `Node` can fail after buffers are allocated
7188 /// without causing failure of its parent in the [`fuchsia.sysmem2/Node`]
7189 /// heirarchy.
7190 ///
7191 /// The dispensable `Node` participates in constraints aggregation along
7192 /// with its parent before buffer allocation. If the dispensable `Node`
7193 /// fails before buffers are allocated, the failure propagates to the
7194 /// dispensable `Node`'s parent.
7195 ///
7196 /// After buffers are allocated, failure of the dispensable `Node` (or any
7197 /// child of the dispensable `Node`) does not propagate to the dispensable
7198 /// `Node`'s parent. Failure does propagate from a normal child of a
7199 /// dispensable `Node` to the dispensable `Node`. Failure of a child is
7200 /// blocked from reaching its parent if the child is attached using
7201 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`], or if the child is
7202 /// dispensable and the failure occurred after allocation.
7203 ///
7204 /// A dispensable `Node` can be used in cases where a participant needs to
7205 /// provide constraints, but after buffers are allocated, the participant
7206 /// can fail without causing buffer collection failure from the parent
7207 /// `Node`'s point of view.
7208 ///
7209 /// In contrast, `BufferCollection.AttachToken` can be used to create a
7210 /// `BufferCollectionToken` which does not participate in constraints
7211 /// aggregation with its parent `Node`, and whose failure at any time does
7212 /// not propagate to its parent `Node`, and whose potential delay providing
7213 /// constraints does not prevent the parent `Node` from completing its
7214 /// buffer allocation.
7215 ///
7216 /// An initiator (creator of the root `Node` using
7217 /// [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`]) may in some
7218 /// scenarios choose to initially use a dispensable `Node` for a first
7219 /// instance of a participant, and then later if the first instance of that
7220 /// participant fails, a new second instance of that participant my be given
7221 /// a `BufferCollectionToken` created with `AttachToken`.
7222 ///
7223 /// Normally a client will `SetDispensable` on a `BufferCollectionToken`
7224 /// shortly before sending the dispensable `BufferCollectionToken` to a
7225 /// delegate participant. Because `SetDispensable` prevents propagation of
7226 /// child `Node` failure to parent `Node`(s), if the client was relying on
7227 /// noticing child failure via failure of the parent `Node` retained by the
7228 /// client, the client may instead need to notice failure via other means.
7229 /// If other means aren't available/convenient, the client can instead
7230 /// retain the dispensable `Node` and create a child `Node` under that to
7231 /// send to the delegate participant, retaining this `Node` in order to
7232 /// notice failure of the subtree rooted at this `Node` via this `Node`'s
7233 /// ZX_CHANNEL_PEER_CLOSED signal, and take whatever action is appropriate
7234 /// (e.g. starting a new instance of the delegate participant and handing it
7235 /// a `BufferCollectionToken` created using
7236 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`], or propagate failure
7237 /// and clean up in a client-specific way).
7238 ///
7239 /// While it is possible (and potentially useful) to `SetDispensable` on a
7240 /// direct child of a `BufferCollectionTokenGroup` `Node`, it isn't possible
7241 /// to later replace a failed dispensable `Node` that was a direct child of
7242 /// a `BufferCollectionTokenGroup` with a new token using `AttachToken`
7243 /// (since there's no `AttachToken` on a group). Instead, to enable
7244 /// `AttachToken` replacement in this case, create an additional
7245 /// non-dispensable token that's a direct child of the group and make the
7246 /// existing dispensable token a child of the additional token. This way,
7247 /// the additional token that is a direct child of the group has
7248 /// `BufferCollection.AttachToken` which can be used to replace the failed
7249 /// dispensable token.
7250 ///
7251 /// `SetDispensable` on an already-dispensable token is idempotent.
7252 pub fn r#set_dispensable(&self) -> Result<(), fidl::Error> {
7253 BufferCollectionTokenProxyInterface::r#set_dispensable(self)
7254 }
7255
7256 /// Create a logical OR among a set of tokens, called a
7257 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
7258 ///
7259 /// Most sysmem clients and many participants don't need to care about this
7260 /// message or about `BufferCollectionTokenGroup`(s). However, in some cases
7261 /// a participant wants to attempt to include one set of delegate
7262 /// participants, but if constraints don't combine successfully that way,
7263 /// fall back to a different (possibly overlapping) set of delegate
7264 /// participants, and/or fall back to a less demanding strategy (in terms of
7265 /// how strict the [`fuchisa.sysmem2/BufferCollectionConstraints`] are,
7266 /// across all involved delegate participants). In such cases, a
7267 /// `BufferCollectionTokenGroup` is useful.
7268 ///
7269 /// A `BufferCollectionTokenGroup` is used to create a 1 of N OR among N
7270 /// child [`fuchsia.sysmem2/BufferCollectionToken`](s). The child tokens
7271 /// which are not selected during aggregation will fail (close), which a
7272 /// potential participant should notice when their `BufferCollection`
7273 /// channel client endpoint sees PEER_CLOSED, allowing the participant to
7274 /// clean up the speculative usage that didn't end up happening (this is
7275 /// simimlar to a normal `BufferCollection` server end closing on failure to
7276 /// allocate a logical buffer collection or later async failure of a buffer
7277 /// collection).
7278 ///
7279 /// See comments on protocol `BufferCollectionTokenGroup`.
7280 ///
7281 /// Any `rights_attenuation_mask` or `AttachToken`/`SetDispensable` to be
7282 /// applied to the whole group can be achieved with a
7283 /// `BufferCollectionToken` for this purpose as a direct parent of the
7284 /// `BufferCollectionTokenGroup`.
7285 ///
7286 /// All table fields are currently required.
7287 ///
7288 /// + request `group_request` The server end of a
7289 /// `BufferCollectionTokenGroup` channel to be served by sysmem.
7290 pub fn r#create_buffer_collection_token_group(
7291 &self,
7292 mut payload: BufferCollectionTokenCreateBufferCollectionTokenGroupRequest,
7293 ) -> Result<(), fidl::Error> {
7294 BufferCollectionTokenProxyInterface::r#create_buffer_collection_token_group(self, payload)
7295 }
7296}
7297
7298impl BufferCollectionTokenProxyInterface for BufferCollectionTokenProxy {
7299 type SyncResponseFut =
7300 fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect>;
7301 fn r#sync(&self) -> Self::SyncResponseFut {
7302 fn _decode(
7303 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
7304 ) -> Result<(), fidl::Error> {
7305 let _response = fidl::client::decode_transaction_body::<
7306 fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
7307 fidl::encoding::DefaultFuchsiaResourceDialect,
7308 0x11ac2555cf575b54,
7309 >(_buf?)?
7310 .into_result::<BufferCollectionTokenMarker>("sync")?;
7311 Ok(_response)
7312 }
7313 self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, ()>(
7314 (),
7315 0x11ac2555cf575b54,
7316 fidl::encoding::DynamicFlags::FLEXIBLE,
7317 _decode,
7318 )
7319 }
7320
7321 fn r#release(&self) -> Result<(), fidl::Error> {
7322 self.client.send::<fidl::encoding::EmptyPayload>(
7323 (),
7324 0x6a5cae7d6d6e04c6,
7325 fidl::encoding::DynamicFlags::FLEXIBLE,
7326 )
7327 }
7328
7329 fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
7330 self.client.send::<NodeSetNameRequest>(
7331 payload,
7332 0xb41f1624f48c1e9,
7333 fidl::encoding::DynamicFlags::FLEXIBLE,
7334 )
7335 }
7336
7337 fn r#set_debug_client_info(
7338 &self,
7339 mut payload: &NodeSetDebugClientInfoRequest,
7340 ) -> Result<(), fidl::Error> {
7341 self.client.send::<NodeSetDebugClientInfoRequest>(
7342 payload,
7343 0x5cde8914608d99b1,
7344 fidl::encoding::DynamicFlags::FLEXIBLE,
7345 )
7346 }
7347
7348 fn r#set_debug_timeout_log_deadline(
7349 &self,
7350 mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
7351 ) -> Result<(), fidl::Error> {
7352 self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
7353 payload,
7354 0x716b0af13d5c0806,
7355 fidl::encoding::DynamicFlags::FLEXIBLE,
7356 )
7357 }
7358
7359 fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
7360 self.client.send::<fidl::encoding::EmptyPayload>(
7361 (),
7362 0x5209c77415b4dfad,
7363 fidl::encoding::DynamicFlags::FLEXIBLE,
7364 )
7365 }
7366
7367 type GetNodeRefResponseFut = fidl::client::QueryResponseFut<
7368 NodeGetNodeRefResponse,
7369 fidl::encoding::DefaultFuchsiaResourceDialect,
7370 >;
7371 fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut {
7372 fn _decode(
7373 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
7374 ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
7375 let _response = fidl::client::decode_transaction_body::<
7376 fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
7377 fidl::encoding::DefaultFuchsiaResourceDialect,
7378 0x5b3d0e51614df053,
7379 >(_buf?)?
7380 .into_result::<BufferCollectionTokenMarker>("get_node_ref")?;
7381 Ok(_response)
7382 }
7383 self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, NodeGetNodeRefResponse>(
7384 (),
7385 0x5b3d0e51614df053,
7386 fidl::encoding::DynamicFlags::FLEXIBLE,
7387 _decode,
7388 )
7389 }
7390
7391 type IsAlternateForResponseFut = fidl::client::QueryResponseFut<
7392 NodeIsAlternateForResult,
7393 fidl::encoding::DefaultFuchsiaResourceDialect,
7394 >;
7395 fn r#is_alternate_for(
7396 &self,
7397 mut payload: NodeIsAlternateForRequest,
7398 ) -> Self::IsAlternateForResponseFut {
7399 fn _decode(
7400 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
7401 ) -> Result<NodeIsAlternateForResult, fidl::Error> {
7402 let _response = fidl::client::decode_transaction_body::<
7403 fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
7404 fidl::encoding::DefaultFuchsiaResourceDialect,
7405 0x3a58e00157e0825,
7406 >(_buf?)?
7407 .into_result::<BufferCollectionTokenMarker>("is_alternate_for")?;
7408 Ok(_response.map(|x| x))
7409 }
7410 self.client.send_query_and_decode::<NodeIsAlternateForRequest, NodeIsAlternateForResult>(
7411 &mut payload,
7412 0x3a58e00157e0825,
7413 fidl::encoding::DynamicFlags::FLEXIBLE,
7414 _decode,
7415 )
7416 }
7417
7418 type GetBufferCollectionIdResponseFut = fidl::client::QueryResponseFut<
7419 NodeGetBufferCollectionIdResponse,
7420 fidl::encoding::DefaultFuchsiaResourceDialect,
7421 >;
7422 fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut {
7423 fn _decode(
7424 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
7425 ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
7426 let _response = fidl::client::decode_transaction_body::<
7427 fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
7428 fidl::encoding::DefaultFuchsiaResourceDialect,
7429 0x77d19a494b78ba8c,
7430 >(_buf?)?
7431 .into_result::<BufferCollectionTokenMarker>("get_buffer_collection_id")?;
7432 Ok(_response)
7433 }
7434 self.client.send_query_and_decode::<
7435 fidl::encoding::EmptyPayload,
7436 NodeGetBufferCollectionIdResponse,
7437 >(
7438 (),
7439 0x77d19a494b78ba8c,
7440 fidl::encoding::DynamicFlags::FLEXIBLE,
7441 _decode,
7442 )
7443 }
7444
7445 fn r#set_weak(&self) -> Result<(), fidl::Error> {
7446 self.client.send::<fidl::encoding::EmptyPayload>(
7447 (),
7448 0x22dd3ea514eeffe1,
7449 fidl::encoding::DynamicFlags::FLEXIBLE,
7450 )
7451 }
7452
7453 fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
7454 self.client.send::<NodeSetWeakOkRequest>(
7455 &mut payload,
7456 0x38a44fc4d7724be9,
7457 fidl::encoding::DynamicFlags::FLEXIBLE,
7458 )
7459 }
7460
7461 fn r#attach_node_tracking(
7462 &self,
7463 mut payload: NodeAttachNodeTrackingRequest,
7464 ) -> Result<(), fidl::Error> {
7465 self.client.send::<NodeAttachNodeTrackingRequest>(
7466 &mut payload,
7467 0x3f22f2a293d3cdac,
7468 fidl::encoding::DynamicFlags::FLEXIBLE,
7469 )
7470 }
7471
7472 type DuplicateSyncResponseFut = fidl::client::QueryResponseFut<
7473 BufferCollectionTokenDuplicateSyncResponse,
7474 fidl::encoding::DefaultFuchsiaResourceDialect,
7475 >;
7476 fn r#duplicate_sync(
7477 &self,
7478 mut payload: &BufferCollectionTokenDuplicateSyncRequest,
7479 ) -> Self::DuplicateSyncResponseFut {
7480 fn _decode(
7481 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
7482 ) -> Result<BufferCollectionTokenDuplicateSyncResponse, fidl::Error> {
7483 let _response = fidl::client::decode_transaction_body::<
7484 fidl::encoding::FlexibleType<BufferCollectionTokenDuplicateSyncResponse>,
7485 fidl::encoding::DefaultFuchsiaResourceDialect,
7486 0x1c1af9919d1ca45c,
7487 >(_buf?)?
7488 .into_result::<BufferCollectionTokenMarker>("duplicate_sync")?;
7489 Ok(_response)
7490 }
7491 self.client.send_query_and_decode::<
7492 BufferCollectionTokenDuplicateSyncRequest,
7493 BufferCollectionTokenDuplicateSyncResponse,
7494 >(
7495 payload,
7496 0x1c1af9919d1ca45c,
7497 fidl::encoding::DynamicFlags::FLEXIBLE,
7498 _decode,
7499 )
7500 }
7501
7502 fn r#duplicate(
7503 &self,
7504 mut payload: BufferCollectionTokenDuplicateRequest,
7505 ) -> Result<(), fidl::Error> {
7506 self.client.send::<BufferCollectionTokenDuplicateRequest>(
7507 &mut payload,
7508 0x73e78f92ee7fb887,
7509 fidl::encoding::DynamicFlags::FLEXIBLE,
7510 )
7511 }
7512
7513 fn r#set_dispensable(&self) -> Result<(), fidl::Error> {
7514 self.client.send::<fidl::encoding::EmptyPayload>(
7515 (),
7516 0x228acf979254df8b,
7517 fidl::encoding::DynamicFlags::FLEXIBLE,
7518 )
7519 }
7520
7521 fn r#create_buffer_collection_token_group(
7522 &self,
7523 mut payload: BufferCollectionTokenCreateBufferCollectionTokenGroupRequest,
7524 ) -> Result<(), fidl::Error> {
7525 self.client.send::<BufferCollectionTokenCreateBufferCollectionTokenGroupRequest>(
7526 &mut payload,
7527 0x30f8d48e77bd36f2,
7528 fidl::encoding::DynamicFlags::FLEXIBLE,
7529 )
7530 }
7531}
7532
7533pub struct BufferCollectionTokenEventStream {
7534 event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
7535}
7536
7537impl std::marker::Unpin for BufferCollectionTokenEventStream {}
7538
7539impl futures::stream::FusedStream for BufferCollectionTokenEventStream {
7540 fn is_terminated(&self) -> bool {
7541 self.event_receiver.is_terminated()
7542 }
7543}
7544
7545impl futures::Stream for BufferCollectionTokenEventStream {
7546 type Item = Result<BufferCollectionTokenEvent, fidl::Error>;
7547
7548 fn poll_next(
7549 mut self: std::pin::Pin<&mut Self>,
7550 cx: &mut std::task::Context<'_>,
7551 ) -> std::task::Poll<Option<Self::Item>> {
7552 match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
7553 &mut self.event_receiver,
7554 cx
7555 )?) {
7556 Some(buf) => std::task::Poll::Ready(Some(BufferCollectionTokenEvent::decode(buf))),
7557 None => std::task::Poll::Ready(None),
7558 }
7559 }
7560}
7561
7562#[derive(Debug)]
7563pub enum BufferCollectionTokenEvent {
7564 #[non_exhaustive]
7565 _UnknownEvent {
7566 /// Ordinal of the event that was sent.
7567 ordinal: u64,
7568 },
7569}
7570
7571impl BufferCollectionTokenEvent {
7572 /// Decodes a message buffer as a [`BufferCollectionTokenEvent`].
7573 fn decode(
7574 mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
7575 ) -> Result<BufferCollectionTokenEvent, fidl::Error> {
7576 let (bytes, _handles) = buf.split_mut();
7577 let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
7578 debug_assert_eq!(tx_header.tx_id, 0);
7579 match tx_header.ordinal {
7580 _ if tx_header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
7581 Ok(BufferCollectionTokenEvent::_UnknownEvent { ordinal: tx_header.ordinal })
7582 }
7583 _ => Err(fidl::Error::UnknownOrdinal {
7584 ordinal: tx_header.ordinal,
7585 protocol_name:
7586 <BufferCollectionTokenMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
7587 }),
7588 }
7589 }
7590}
7591
7592/// A Stream of incoming requests for fuchsia.sysmem2/BufferCollectionToken.
7593pub struct BufferCollectionTokenRequestStream {
7594 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
7595 is_terminated: bool,
7596}
7597
7598impl std::marker::Unpin for BufferCollectionTokenRequestStream {}
7599
7600impl futures::stream::FusedStream for BufferCollectionTokenRequestStream {
7601 fn is_terminated(&self) -> bool {
7602 self.is_terminated
7603 }
7604}
7605
7606impl fidl::endpoints::RequestStream for BufferCollectionTokenRequestStream {
7607 type Protocol = BufferCollectionTokenMarker;
7608 type ControlHandle = BufferCollectionTokenControlHandle;
7609
7610 fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
7611 Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
7612 }
7613
7614 fn control_handle(&self) -> Self::ControlHandle {
7615 BufferCollectionTokenControlHandle { inner: self.inner.clone() }
7616 }
7617
7618 fn into_inner(
7619 self,
7620 ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
7621 {
7622 (self.inner, self.is_terminated)
7623 }
7624
7625 fn from_inner(
7626 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
7627 is_terminated: bool,
7628 ) -> Self {
7629 Self { inner, is_terminated }
7630 }
7631}
7632
7633impl futures::Stream for BufferCollectionTokenRequestStream {
7634 type Item = Result<BufferCollectionTokenRequest, fidl::Error>;
7635
7636 fn poll_next(
7637 mut self: std::pin::Pin<&mut Self>,
7638 cx: &mut std::task::Context<'_>,
7639 ) -> std::task::Poll<Option<Self::Item>> {
7640 let this = &mut *self;
7641 if this.inner.check_shutdown(cx) {
7642 this.is_terminated = true;
7643 return std::task::Poll::Ready(None);
7644 }
7645 if this.is_terminated {
7646 panic!("polled BufferCollectionTokenRequestStream after completion");
7647 }
7648 fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
7649 |bytes, handles| {
7650 match this.inner.channel().read_etc(cx, bytes, handles) {
7651 std::task::Poll::Ready(Ok(())) => {}
7652 std::task::Poll::Pending => return std::task::Poll::Pending,
7653 std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
7654 this.is_terminated = true;
7655 return std::task::Poll::Ready(None);
7656 }
7657 std::task::Poll::Ready(Err(e)) => {
7658 return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
7659 e.into(),
7660 ))));
7661 }
7662 }
7663
7664 // A message has been received from the channel
7665 let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
7666
7667 std::task::Poll::Ready(Some(match header.ordinal {
7668 0x11ac2555cf575b54 => {
7669 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
7670 let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
7671 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
7672 let control_handle = BufferCollectionTokenControlHandle {
7673 inner: this.inner.clone(),
7674 };
7675 Ok(BufferCollectionTokenRequest::Sync {
7676 responder: BufferCollectionTokenSyncResponder {
7677 control_handle: std::mem::ManuallyDrop::new(control_handle),
7678 tx_id: header.tx_id,
7679 },
7680 })
7681 }
7682 0x6a5cae7d6d6e04c6 => {
7683 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7684 let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
7685 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
7686 let control_handle = BufferCollectionTokenControlHandle {
7687 inner: this.inner.clone(),
7688 };
7689 Ok(BufferCollectionTokenRequest::Release {
7690 control_handle,
7691 })
7692 }
7693 0xb41f1624f48c1e9 => {
7694 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7695 let mut req = fidl::new_empty!(NodeSetNameRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7696 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetNameRequest>(&header, _body_bytes, handles, &mut req)?;
7697 let control_handle = BufferCollectionTokenControlHandle {
7698 inner: this.inner.clone(),
7699 };
7700 Ok(BufferCollectionTokenRequest::SetName {payload: req,
7701 control_handle,
7702 })
7703 }
7704 0x5cde8914608d99b1 => {
7705 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7706 let mut req = fidl::new_empty!(NodeSetDebugClientInfoRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7707 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugClientInfoRequest>(&header, _body_bytes, handles, &mut req)?;
7708 let control_handle = BufferCollectionTokenControlHandle {
7709 inner: this.inner.clone(),
7710 };
7711 Ok(BufferCollectionTokenRequest::SetDebugClientInfo {payload: req,
7712 control_handle,
7713 })
7714 }
7715 0x716b0af13d5c0806 => {
7716 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7717 let mut req = fidl::new_empty!(NodeSetDebugTimeoutLogDeadlineRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7718 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugTimeoutLogDeadlineRequest>(&header, _body_bytes, handles, &mut req)?;
7719 let control_handle = BufferCollectionTokenControlHandle {
7720 inner: this.inner.clone(),
7721 };
7722 Ok(BufferCollectionTokenRequest::SetDebugTimeoutLogDeadline {payload: req,
7723 control_handle,
7724 })
7725 }
7726 0x5209c77415b4dfad => {
7727 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7728 let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
7729 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
7730 let control_handle = BufferCollectionTokenControlHandle {
7731 inner: this.inner.clone(),
7732 };
7733 Ok(BufferCollectionTokenRequest::SetVerboseLogging {
7734 control_handle,
7735 })
7736 }
7737 0x5b3d0e51614df053 => {
7738 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
7739 let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
7740 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
7741 let control_handle = BufferCollectionTokenControlHandle {
7742 inner: this.inner.clone(),
7743 };
7744 Ok(BufferCollectionTokenRequest::GetNodeRef {
7745 responder: BufferCollectionTokenGetNodeRefResponder {
7746 control_handle: std::mem::ManuallyDrop::new(control_handle),
7747 tx_id: header.tx_id,
7748 },
7749 })
7750 }
7751 0x3a58e00157e0825 => {
7752 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
7753 let mut req = fidl::new_empty!(NodeIsAlternateForRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7754 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeIsAlternateForRequest>(&header, _body_bytes, handles, &mut req)?;
7755 let control_handle = BufferCollectionTokenControlHandle {
7756 inner: this.inner.clone(),
7757 };
7758 Ok(BufferCollectionTokenRequest::IsAlternateFor {payload: req,
7759 responder: BufferCollectionTokenIsAlternateForResponder {
7760 control_handle: std::mem::ManuallyDrop::new(control_handle),
7761 tx_id: header.tx_id,
7762 },
7763 })
7764 }
7765 0x77d19a494b78ba8c => {
7766 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
7767 let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
7768 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
7769 let control_handle = BufferCollectionTokenControlHandle {
7770 inner: this.inner.clone(),
7771 };
7772 Ok(BufferCollectionTokenRequest::GetBufferCollectionId {
7773 responder: BufferCollectionTokenGetBufferCollectionIdResponder {
7774 control_handle: std::mem::ManuallyDrop::new(control_handle),
7775 tx_id: header.tx_id,
7776 },
7777 })
7778 }
7779 0x22dd3ea514eeffe1 => {
7780 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7781 let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
7782 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
7783 let control_handle = BufferCollectionTokenControlHandle {
7784 inner: this.inner.clone(),
7785 };
7786 Ok(BufferCollectionTokenRequest::SetWeak {
7787 control_handle,
7788 })
7789 }
7790 0x38a44fc4d7724be9 => {
7791 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7792 let mut req = fidl::new_empty!(NodeSetWeakOkRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7793 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetWeakOkRequest>(&header, _body_bytes, handles, &mut req)?;
7794 let control_handle = BufferCollectionTokenControlHandle {
7795 inner: this.inner.clone(),
7796 };
7797 Ok(BufferCollectionTokenRequest::SetWeakOk {payload: req,
7798 control_handle,
7799 })
7800 }
7801 0x3f22f2a293d3cdac => {
7802 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7803 let mut req = fidl::new_empty!(NodeAttachNodeTrackingRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7804 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeAttachNodeTrackingRequest>(&header, _body_bytes, handles, &mut req)?;
7805 let control_handle = BufferCollectionTokenControlHandle {
7806 inner: this.inner.clone(),
7807 };
7808 Ok(BufferCollectionTokenRequest::AttachNodeTracking {payload: req,
7809 control_handle,
7810 })
7811 }
7812 0x1c1af9919d1ca45c => {
7813 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
7814 let mut req = fidl::new_empty!(BufferCollectionTokenDuplicateSyncRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7815 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionTokenDuplicateSyncRequest>(&header, _body_bytes, handles, &mut req)?;
7816 let control_handle = BufferCollectionTokenControlHandle {
7817 inner: this.inner.clone(),
7818 };
7819 Ok(BufferCollectionTokenRequest::DuplicateSync {payload: req,
7820 responder: BufferCollectionTokenDuplicateSyncResponder {
7821 control_handle: std::mem::ManuallyDrop::new(control_handle),
7822 tx_id: header.tx_id,
7823 },
7824 })
7825 }
7826 0x73e78f92ee7fb887 => {
7827 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7828 let mut req = fidl::new_empty!(BufferCollectionTokenDuplicateRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7829 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionTokenDuplicateRequest>(&header, _body_bytes, handles, &mut req)?;
7830 let control_handle = BufferCollectionTokenControlHandle {
7831 inner: this.inner.clone(),
7832 };
7833 Ok(BufferCollectionTokenRequest::Duplicate {payload: req,
7834 control_handle,
7835 })
7836 }
7837 0x228acf979254df8b => {
7838 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7839 let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
7840 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
7841 let control_handle = BufferCollectionTokenControlHandle {
7842 inner: this.inner.clone(),
7843 };
7844 Ok(BufferCollectionTokenRequest::SetDispensable {
7845 control_handle,
7846 })
7847 }
7848 0x30f8d48e77bd36f2 => {
7849 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7850 let mut req = fidl::new_empty!(BufferCollectionTokenCreateBufferCollectionTokenGroupRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7851 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionTokenCreateBufferCollectionTokenGroupRequest>(&header, _body_bytes, handles, &mut req)?;
7852 let control_handle = BufferCollectionTokenControlHandle {
7853 inner: this.inner.clone(),
7854 };
7855 Ok(BufferCollectionTokenRequest::CreateBufferCollectionTokenGroup {payload: req,
7856 control_handle,
7857 })
7858 }
7859 _ if header.tx_id == 0 && header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
7860 Ok(BufferCollectionTokenRequest::_UnknownMethod {
7861 ordinal: header.ordinal,
7862 control_handle: BufferCollectionTokenControlHandle { inner: this.inner.clone() },
7863 method_type: fidl::MethodType::OneWay,
7864 })
7865 }
7866 _ if header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
7867 this.inner.send_framework_err(
7868 fidl::encoding::FrameworkErr::UnknownMethod,
7869 header.tx_id,
7870 header.ordinal,
7871 header.dynamic_flags(),
7872 (bytes, handles),
7873 )?;
7874 Ok(BufferCollectionTokenRequest::_UnknownMethod {
7875 ordinal: header.ordinal,
7876 control_handle: BufferCollectionTokenControlHandle { inner: this.inner.clone() },
7877 method_type: fidl::MethodType::TwoWay,
7878 })
7879 }
7880 _ => Err(fidl::Error::UnknownOrdinal {
7881 ordinal: header.ordinal,
7882 protocol_name: <BufferCollectionTokenMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
7883 }),
7884 }))
7885 },
7886 )
7887 }
7888}
7889
7890/// A [`fuchsia.sysmem2/BufferCollectionToken`] is not a buffer collection, but
7891/// rather is a way to identify a specific potential shared buffer collection,
7892/// and a way to distribute that potential shared buffer collection to
7893/// additional participants prior to the buffer collection allocating any
7894/// buffers.
7895///
7896/// Epitaphs are not used in this protocol.
7897///
7898/// We use a channel for the `BufferCollectionToken` instead of a single
7899/// `eventpair` (pair) because this way we can detect error conditions like a
7900/// participant failing mid-create.
7901#[derive(Debug)]
7902pub enum BufferCollectionTokenRequest {
7903 /// Ensure that previous messages have been received server side. This is
7904 /// particularly useful after previous messages that created new tokens,
7905 /// because a token must be known to the sysmem server before sending the
7906 /// token to another participant.
7907 ///
7908 /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
7909 /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
7910 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
7911 /// to mitigate the possibility of a hostile/fake
7912 /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
7913 /// Another way is to pass the token to
7914 /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
7915 /// the token as part of exchanging it for a
7916 /// [`fuchsia.sysmem2/BufferCollection`] channel, and
7917 /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
7918 /// of stalling.
7919 ///
7920 /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
7921 /// and then starting and completing a `Sync`, it's then safe to send the
7922 /// `BufferCollectionToken` client ends to other participants knowing the
7923 /// server will recognize the tokens when they're sent by the other
7924 /// participants to sysmem in a
7925 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
7926 /// efficient way to create tokens while avoiding unnecessary round trips.
7927 ///
7928 /// Other options include waiting for each
7929 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
7930 /// individually (using separate call to `Sync` after each), or calling
7931 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
7932 /// converted to a `BufferCollection` via
7933 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
7934 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
7935 /// the sync step and can create multiple tokens at once.
7936 Sync { responder: BufferCollectionTokenSyncResponder },
7937 /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
7938 ///
7939 /// Normally a participant will convert a `BufferCollectionToken` into a
7940 /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
7941 /// `Release` via the token (and then close the channel immediately or
7942 /// shortly later in response to server closing the server end), which
7943 /// avoids causing buffer collection failure. Without a prior `Release`,
7944 /// closing the `BufferCollectionToken` client end will cause buffer
7945 /// collection failure.
7946 ///
7947 /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
7948 ///
7949 /// By default the server handles unexpected closure of a
7950 /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
7951 /// first) by failing the buffer collection. Partly this is to expedite
7952 /// closing VMO handles to reclaim memory when any participant fails. If a
7953 /// participant would like to cleanly close a `BufferCollection` without
7954 /// causing buffer collection failure, the participant can send `Release`
7955 /// before closing the `BufferCollection` client end. The `Release` can
7956 /// occur before or after `SetConstraints`. If before `SetConstraints`, the
7957 /// buffer collection won't require constraints from this node in order to
7958 /// allocate. If after `SetConstraints`, the constraints are retained and
7959 /// aggregated, despite the lack of `BufferCollection` connection at the
7960 /// time of constraints aggregation.
7961 ///
7962 /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
7963 ///
7964 /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
7965 /// end (without `Release` first) will trigger failure of the buffer
7966 /// collection. To close a `BufferCollectionTokenGroup` channel without
7967 /// failing the buffer collection, ensure that AllChildrenPresent() has been
7968 /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
7969 /// client end.
7970 ///
7971 /// If `Release` occurs before
7972 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
7973 /// buffer collection will fail (triggered by reception of `Release` without
7974 /// prior `AllChildrenPresent`). This is intentionally not analogous to how
7975 /// [`fuchsia.sysmem2/BufferCollection.Release`] without
7976 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
7977 /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
7978 /// close requires `AllChildrenPresent` (if not already sent), then
7979 /// `Release`, then close client end.
7980 ///
7981 /// If `Release` occurs after `AllChildrenPresent`, the children and all
7982 /// their constraints remain intact (just as they would if the
7983 /// `BufferCollectionTokenGroup` channel had remained open), and the client
7984 /// end close doesn't trigger buffer collection failure.
7985 ///
7986 /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
7987 ///
7988 /// For brevity, the per-channel-protocol paragraphs above ignore the
7989 /// separate failure domain created by
7990 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
7991 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
7992 /// unexpectedly closes (without `Release` first) and that client end is
7993 /// under a failure domain, instead of failing the whole buffer collection,
7994 /// the failure domain is failed, but the buffer collection itself is
7995 /// isolated from failure of the failure domain. Such failure domains can be
7996 /// nested, in which case only the inner-most failure domain in which the
7997 /// `Node` resides fails.
7998 Release { control_handle: BufferCollectionTokenControlHandle },
7999 /// Set a name for VMOs in this buffer collection.
8000 ///
8001 /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
8002 /// will be truncated to fit. The name of the vmo will be suffixed with the
8003 /// buffer index within the collection (if the suffix fits within
8004 /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
8005 /// listed in the inspect data.
8006 ///
8007 /// The name only affects VMOs allocated after the name is set; this call
8008 /// does not rename existing VMOs. If multiple clients set different names
8009 /// then the larger priority value will win. Setting a new name with the
8010 /// same priority as a prior name doesn't change the name.
8011 ///
8012 /// All table fields are currently required.
8013 ///
8014 /// + request `priority` The name is only set if this is the first `SetName`
8015 /// or if `priority` is greater than any previous `priority` value in
8016 /// prior `SetName` calls across all `Node`(s) of this buffer collection.
8017 /// + request `name` The name for VMOs created under this buffer collection.
8018 SetName { payload: NodeSetNameRequest, control_handle: BufferCollectionTokenControlHandle },
8019 /// Set information about the current client that can be used by sysmem to
8020 /// help diagnose leaking memory and allocation stalls waiting for a
8021 /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
8022 ///
8023 /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
8024 /// `Node`(s) derived from this `Node`, unless overriden by
8025 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
8026 /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
8027 ///
8028 /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
8029 /// `Allocator` is the most efficient way to ensure that all
8030 /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
8031 /// set, and is also more efficient than separately sending the same debug
8032 /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
8033 /// created [`fuchsia.sysmem2/Node`].
8034 ///
8035 /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
8036 /// indicate which client is closing their channel first, leading to subtree
8037 /// failure (which can be normal if the purpose of the subtree is over, but
8038 /// if happening earlier than expected, the client-channel-specific name can
8039 /// help diagnose where the failure is first coming from, from sysmem's
8040 /// point of view).
8041 ///
8042 /// All table fields are currently required.
8043 ///
8044 /// + request `name` This can be an arbitrary string, but the current
8045 /// process name (see `fsl::GetCurrentProcessName`) is a good default.
8046 /// + request `id` This can be an arbitrary id, but the current process ID
8047 /// (see `fsl::GetCurrentProcessKoid`) is a good default.
8048 SetDebugClientInfo {
8049 payload: NodeSetDebugClientInfoRequest,
8050 control_handle: BufferCollectionTokenControlHandle,
8051 },
8052 /// Sysmem logs a warning if sysmem hasn't seen
8053 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
8054 /// within 5 seconds after creation of a new collection.
8055 ///
8056 /// Clients can call this method to change when the log is printed. If
8057 /// multiple client set the deadline, it's unspecified which deadline will
8058 /// take effect.
8059 ///
8060 /// In most cases the default works well.
8061 ///
8062 /// All table fields are currently required.
8063 ///
8064 /// + request `deadline` The time at which sysmem will start trying to log
8065 /// the warning, unless all constraints are with sysmem by then.
8066 SetDebugTimeoutLogDeadline {
8067 payload: NodeSetDebugTimeoutLogDeadlineRequest,
8068 control_handle: BufferCollectionTokenControlHandle,
8069 },
8070 /// This enables verbose logging for the buffer collection.
8071 ///
8072 /// Verbose logging includes constraints set via
8073 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
8074 /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
8075 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
8076 /// the tree of `Node`(s).
8077 ///
8078 /// Normally sysmem prints only a single line complaint when aggregation
8079 /// fails, with just the specific detailed reason that aggregation failed,
8080 /// with little surrounding context. While this is often enough to diagnose
8081 /// a problem if only a small change was made and everything was working
8082 /// before the small change, it's often not particularly helpful for getting
8083 /// a new buffer collection to work for the first time. Especially with
8084 /// more complex trees of nodes, involving things like
8085 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
8086 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
8087 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
8088 /// subtrees of nodes, verbose logging may help in diagnosing what the tree
8089 /// looks like and why it's failing a logical allocation, or why a tree or
8090 /// subtree is failing sooner than expected.
8091 ///
8092 /// The intent of the extra logging is to be acceptable from a performance
8093 /// point of view, under the assumption that verbose logging is only enabled
8094 /// on a low number of buffer collections. If we're not tracking down a bug,
8095 /// we shouldn't send this message.
8096 SetVerboseLogging { control_handle: BufferCollectionTokenControlHandle },
8097 /// This gets a handle that can be used as a parameter to
8098 /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
8099 /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
8100 /// client obtained this handle from this `Node`.
8101 ///
8102 /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
8103 /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
8104 /// despite the two calls typically being on different channels.
8105 ///
8106 /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
8107 ///
8108 /// All table fields are currently required.
8109 ///
8110 /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
8111 /// different `Node` channel, to prove that the client obtained the handle
8112 /// from this `Node`.
8113 GetNodeRef { responder: BufferCollectionTokenGetNodeRefResponder },
8114 /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
8115 /// rooted at a different child token of a common parent
8116 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
8117 /// passed-in `node_ref`.
8118 ///
8119 /// This call is for assisting with admission control de-duplication, and
8120 /// with debugging.
8121 ///
8122 /// The `node_ref` must be obtained using
8123 /// [`fuchsia.sysmem2/Node.GetNodeRef`].
8124 ///
8125 /// The `node_ref` can be a duplicated handle; it's not necessary to call
8126 /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
8127 ///
8128 /// If a calling token may not actually be a valid token at all due to a
8129 /// potentially hostile/untrusted provider of the token, call
8130 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
8131 /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
8132 /// never responds due to a calling token not being a real token (not really
8133 /// talking to sysmem). Another option is to call
8134 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
8135 /// which also validates the token along with converting it to a
8136 /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
8137 ///
8138 /// All table fields are currently required.
8139 ///
8140 /// - response `is_alternate`
8141 /// - true: The first parent node in common between the calling node and
8142 /// the `node_ref` `Node` is a `BufferCollectionTokenGroup`. This means
8143 /// that the calling `Node` and the `node_ref` `Node` will not have both
8144 /// their constraints apply - rather sysmem will choose one or the other
8145 /// of the constraints - never both. This is because only one child of
8146 /// a `BufferCollectionTokenGroup` is selected during logical
8147 /// allocation, with only that one child's subtree contributing to
8148 /// constraints aggregation.
8149 /// - false: The first parent node in common between the calling `Node`
8150 /// and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
8151 /// Currently, this means the first parent node in common is a
8152 /// `BufferCollectionToken` or `BufferCollection` (regardless of not
8153 /// `Release`ed). This means that the calling `Node` and the `node_ref`
8154 /// `Node` may have both their constraints apply during constraints
8155 /// aggregation of the logical allocation, if both `Node`(s) are
8156 /// selected by any parent `BufferCollectionTokenGroup`(s) involved. In
8157 /// this case, there is no `BufferCollectionTokenGroup` that will
8158 /// directly prevent the two `Node`(s) from both being selected and
8159 /// their constraints both aggregated, but even when false, one or both
8160 /// `Node`(s) may still be eliminated from consideration if one or both
8161 /// `Node`(s) has a direct or indirect parent
8162 /// `BufferCollectionTokenGroup` which selects a child subtree other
8163 /// than the subtree containing the calling `Node` or `node_ref` `Node`.
8164 /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
8165 /// associated with the same buffer collection as the calling `Node`.
8166 /// Another reason for this error is if the `node_ref` is an
8167 /// [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
8168 /// a real `node_ref` obtained from `GetNodeRef`.
8169 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
8170 /// `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
8171 /// the needed rights expected on a real `node_ref`.
8172 /// * No other failing status codes are returned by this call. However,
8173 /// sysmem may add additional codes in future, so the client should have
8174 /// sensible default handling for any failing status code.
8175 IsAlternateFor {
8176 payload: NodeIsAlternateForRequest,
8177 responder: BufferCollectionTokenIsAlternateForResponder,
8178 },
8179 /// Get the buffer collection ID. This ID is also available from
8180 /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
8181 /// within the collection).
8182 ///
8183 /// This call is mainly useful in situations where we can't convey a
8184 /// [`fuchsia.sysmem2/BufferCollectionToken`] or
8185 /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
8186 /// handle, which can be joined back up with a `BufferCollection` client end
8187 /// that was created via a different path. Prefer to convey a
8188 /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
8189 ///
8190 /// Trusting a `buffer_collection_id` value from a source other than sysmem
8191 /// is analogous to trusting a koid value from a source other than zircon.
8192 /// Both should be avoided unless really necessary, and both require
8193 /// caution. In some situations it may be reasonable to refer to a
8194 /// pre-established `BufferCollection` by `buffer_collection_id` via a
8195 /// protocol for efficiency reasons, but an incoming value purporting to be
8196 /// a `buffer_collection_id` is not sufficient alone to justify granting the
8197 /// sender of the `buffer_collection_id` any capability. The sender must
8198 /// first prove to a receiver that the sender has/had a VMO or has/had a
8199 /// `BufferCollectionToken` to the same collection by sending a handle that
8200 /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
8201 /// `buffer_collection_id` value. The receiver should take care to avoid
8202 /// assuming that a sender had a `BufferCollectionToken` in cases where the
8203 /// sender has only proven that the sender had a VMO.
8204 ///
8205 /// - response `buffer_collection_id` This ID is unique per buffer
8206 /// collection per boot. Each buffer is uniquely identified by the
8207 /// `buffer_collection_id` and `buffer_index` together.
8208 GetBufferCollectionId { responder: BufferCollectionTokenGetBufferCollectionIdResponder },
8209 /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
8210 /// created after this message to weak, which means that a client's `Node`
8211 /// client end (or a child created after this message) is not alone
8212 /// sufficient to keep allocated VMOs alive.
8213 ///
8214 /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
8215 /// `close_weak_asap`.
8216 ///
8217 /// This message is only permitted before the `Node` becomes ready for
8218 /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
8219 /// * `BufferCollectionToken`: any time
8220 /// * `BufferCollection`: before `SetConstraints`
8221 /// * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
8222 ///
8223 /// Currently, no conversion from strong `Node` to weak `Node` after ready
8224 /// for allocation is provided, but a client can simulate that by creating
8225 /// an additional `Node` before allocation and setting that additional
8226 /// `Node` to weak, and then potentially at some point later sending
8227 /// `Release` and closing the client end of the client's strong `Node`, but
8228 /// keeping the client's weak `Node`.
8229 ///
8230 /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
8231 /// collection failure (all `Node` client end(s) will see
8232 /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
8233 /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
8234 /// this situation until all `Node`(s) are ready for allocation. For initial
8235 /// allocation to succeed, at least one strong `Node` is required to exist
8236 /// at allocation time, but after that client receives VMO handles, that
8237 /// client can `BufferCollection.Release` and close the client end without
8238 /// causing this type of failure.
8239 ///
8240 /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
8241 /// imply `SetWeakOk` with `for_children_also` true, which can be sent
8242 /// separately as appropriate.
8243 SetWeak { control_handle: BufferCollectionTokenControlHandle },
8244 /// This indicates to sysmem that the client is prepared to pay attention to
8245 /// `close_weak_asap`.
8246 ///
8247 /// If sent, this message must be before
8248 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
8249 ///
8250 /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
8251 /// send this message before `WaitForAllBuffersAllocated`, or a parent
8252 /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
8253 /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
8254 /// trigger buffer collection failure.
8255 ///
8256 /// This message is necessary because weak sysmem VMOs have not always been
8257 /// a thing, so older clients are not aware of the need to pay attention to
8258 /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
8259 /// sysmem weak VMO handles asap. By having this message and requiring
8260 /// participants to indicate their acceptance of this aspect of the overall
8261 /// protocol, we avoid situations where an older client is delivered a weak
8262 /// VMO without any way for sysmem to get that VMO to close quickly later
8263 /// (and on a per-buffer basis).
8264 ///
8265 /// A participant that doesn't handle `close_weak_asap` and also doesn't
8266 /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
8267 /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
8268 /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
8269 /// same participant has a child/delegate which does retrieve VMOs, that
8270 /// child/delegate will need to send `SetWeakOk` before
8271 /// `WaitForAllBuffersAllocated`.
8272 ///
8273 /// + request `for_child_nodes_also` If present and true, this means direct
8274 /// child nodes of this node created after this message plus all
8275 /// descendants of those nodes will behave as if `SetWeakOk` was sent on
8276 /// those nodes. Any child node of this node that was created before this
8277 /// message is not included. This setting is "sticky" in the sense that a
8278 /// subsequent `SetWeakOk` without this bool set to true does not reset
8279 /// the server-side bool. If this creates a problem for a participant, a
8280 /// workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
8281 /// tokens instead, as appropriate. A participant should only set
8282 /// `for_child_nodes_also` true if the participant can really promise to
8283 /// obey `close_weak_asap` both for its own weak VMO handles, and for all
8284 /// weak VMO handles held by participants holding the corresponding child
8285 /// `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
8286 /// which are using sysmem(1) can be weak, despite the clients of those
8287 /// sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
8288 /// direct way to find out about `close_weak_asap`. This only applies to
8289 /// descendents of this `Node` which are using sysmem(1), not to this
8290 /// `Node` when converted directly from a sysmem2 token to a sysmem(1)
8291 /// token, which will fail allocation unless an ancestor of this `Node`
8292 /// specified `for_child_nodes_also` true.
8293 SetWeakOk { payload: NodeSetWeakOkRequest, control_handle: BufferCollectionTokenControlHandle },
8294 /// The server_end will be closed after this `Node` and any child nodes have
8295 /// have released their buffer counts, making those counts available for
8296 /// reservation by a different `Node` via
8297 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
8298 ///
8299 /// The `Node` buffer counts may not be released until the entire tree of
8300 /// `Node`(s) is closed or failed, because
8301 /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
8302 /// does not immediately un-reserve the `Node` buffer counts. Instead, the
8303 /// `Node` buffer counts remain reserved until the orphaned node is later
8304 /// cleaned up.
8305 ///
8306 /// If the `Node` exceeds a fairly large number of attached eventpair server
8307 /// ends, a log message will indicate this and the `Node` (and the
8308 /// appropriate) sub-tree will fail.
8309 ///
8310 /// The `server_end` will remain open when
8311 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
8312 /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
8313 /// [`fuchsia.sysmem2/BufferCollection`].
8314 ///
8315 /// This message can also be used with a
8316 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
8317 AttachNodeTracking {
8318 payload: NodeAttachNodeTrackingRequest,
8319 control_handle: BufferCollectionTokenControlHandle,
8320 },
8321 /// Create additional [`fuchsia.sysmem2/BufferCollectionToken`](s) from this
8322 /// one, referring to the same buffer collection.
8323 ///
8324 /// The created tokens are children of this token in the
8325 /// [`fuchsia.sysmem2/Node`] heirarchy.
8326 ///
8327 /// This method can be used to add more participants, by transferring the
8328 /// newly created tokens to additional participants.
8329 ///
8330 /// A new token will be returned for each entry in the
8331 /// `rights_attenuation_masks` array.
8332 ///
8333 /// If the called token may not actually be a valid token due to a
8334 /// potentially hostile/untrusted provider of the token, consider using
8335 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
8336 /// instead of potentially getting stuck indefinitely if
8337 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] never responds
8338 /// due to the calling token not being a real token.
8339 ///
8340 /// In contrast to [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`], no
8341 /// separate [`fuchsia.sysmem2/Node.Sync`] is needed after calling this
8342 /// method, because the sync step is included in this call, at the cost of a
8343 /// round trip during this call.
8344 ///
8345 /// All tokens must be turned in to sysmem via
8346 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] or
8347 /// [`fuchsia.sysmem2/Node.Release`] for a `BufferCollection` to
8348 /// successfully allocate buffers (or to logically allocate buffers in the
8349 /// case of subtrees involving
8350 /// [`fuchsia.sysmem2/BufferCollectionToken.AttachToken`]).
8351 ///
8352 /// All table fields are currently required.
8353 ///
8354 /// + request `rights_attenuation_mask` In each entry of
8355 /// `rights_attenuation_masks`, rights bits that are zero will be absent
8356 /// in the buffer VMO rights obtainable via the corresponding returned
8357 /// token. This allows an initiator or intermediary participant to
8358 /// attenuate the rights available to a participant. This does not allow a
8359 /// participant to gain rights that the participant doesn't already have.
8360 /// The value `ZX_RIGHT_SAME_RIGHTS` can be used to specify that no
8361 /// attenuation should be applied.
8362 /// - response `tokens` The client ends of each newly created token.
8363 DuplicateSync {
8364 payload: BufferCollectionTokenDuplicateSyncRequest,
8365 responder: BufferCollectionTokenDuplicateSyncResponder,
8366 },
8367 /// Create an additional [`fuchsia.sysmem2/BufferCollectionToken`] from this
8368 /// one, referring to the same buffer collection.
8369 ///
8370 /// The created token is a child of this token in the
8371 /// [`fuchsia.sysmem2/Node`] heirarchy.
8372 ///
8373 /// This method can be used to add a participant, by transferring the newly
8374 /// created token to another participant.
8375 ///
8376 /// This one-way message can be used instead of the two-way
8377 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] FIDL call in
8378 /// performance sensitive cases where it would be undesireable to wait for
8379 /// sysmem to respond to
8380 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] or when the
8381 /// client code isn't structured to make it easy to duplicate all the needed
8382 /// tokens at once.
8383 ///
8384 /// After sending one or more `Duplicate` messages, and before sending the
8385 /// newly created child tokens to other participants (or to other
8386 /// [`fuchsia.sysmem2/Allocator`] channels), the client must send a
8387 /// [`fuchsia.sysmem2/Node.Sync`] and wait for the `Sync` response. The
8388 /// `Sync` call can be made on the token, or on the `BufferCollection`
8389 /// obtained by passing this token to `BindSharedCollection`. Either will
8390 /// ensure that the server knows about the tokens created via `Duplicate`
8391 /// before the other participant sends the token to the server via separate
8392 /// `Allocator` channel.
8393 ///
8394 /// All tokens must be turned in via
8395 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] or
8396 /// [`fuchsia.sysmem2/Node.Release`] for a `BufferCollection` to
8397 /// successfully allocate buffers.
8398 ///
8399 /// All table fields are currently required.
8400 ///
8401 /// + request `rights_attenuation_mask` The rights bits that are zero in
8402 /// this mask will be absent in the buffer VMO rights obtainable via the
8403 /// client end of `token_request`. This allows an initiator or
8404 /// intermediary participant to attenuate the rights available to a
8405 /// delegate participant. This does not allow a participant to gain rights
8406 /// that the participant doesn't already have. The value
8407 /// `ZX_RIGHT_SAME_RIGHTS` can be used to specify that no attenuation
8408 /// should be applied.
8409 /// + These values for rights_attenuation_mask result in no attenuation:
8410 /// + `ZX_RIGHT_SAME_RIGHTS` (preferred)
8411 /// + 0xFFFFFFFF (this is reasonable when an attenuation mask is
8412 /// computed)
8413 /// + 0 (deprecated - do not use 0 - an ERROR will go to the log)
8414 /// + request `token_request` is the server end of a `BufferCollectionToken`
8415 /// channel. The client end of this channel acts as another participant in
8416 /// the shared buffer collection.
8417 Duplicate {
8418 payload: BufferCollectionTokenDuplicateRequest,
8419 control_handle: BufferCollectionTokenControlHandle,
8420 },
8421 /// Set this [`fuchsia.sysmem2/BufferCollectionToken`] to dispensable.
8422 ///
8423 /// When the `BufferCollectionToken` is converted to a
8424 /// [`fuchsia.sysmem2/BufferCollection`], the dispensable status applies to
8425 /// the `BufferCollection` also.
8426 ///
8427 /// Normally, if a client closes a [`fuchsia.sysmem2/BufferCollection`]
8428 /// client end without having sent
8429 /// [`fuchsia.sysmem2/BufferCollection.Release`] first, the
8430 /// `BufferCollection` [`fuchisa.sysmem2/Node`] will fail, which also
8431 /// propagates failure to the parent [`fuchsia.sysmem2/Node`] and so on up
8432 /// to the root `Node`, which fails the whole buffer collection. In
8433 /// contrast, a dispensable `Node` can fail after buffers are allocated
8434 /// without causing failure of its parent in the [`fuchsia.sysmem2/Node`]
8435 /// heirarchy.
8436 ///
8437 /// The dispensable `Node` participates in constraints aggregation along
8438 /// with its parent before buffer allocation. If the dispensable `Node`
8439 /// fails before buffers are allocated, the failure propagates to the
8440 /// dispensable `Node`'s parent.
8441 ///
8442 /// After buffers are allocated, failure of the dispensable `Node` (or any
8443 /// child of the dispensable `Node`) does not propagate to the dispensable
8444 /// `Node`'s parent. Failure does propagate from a normal child of a
8445 /// dispensable `Node` to the dispensable `Node`. Failure of a child is
8446 /// blocked from reaching its parent if the child is attached using
8447 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`], or if the child is
8448 /// dispensable and the failure occurred after allocation.
8449 ///
8450 /// A dispensable `Node` can be used in cases where a participant needs to
8451 /// provide constraints, but after buffers are allocated, the participant
8452 /// can fail without causing buffer collection failure from the parent
8453 /// `Node`'s point of view.
8454 ///
8455 /// In contrast, `BufferCollection.AttachToken` can be used to create a
8456 /// `BufferCollectionToken` which does not participate in constraints
8457 /// aggregation with its parent `Node`, and whose failure at any time does
8458 /// not propagate to its parent `Node`, and whose potential delay providing
8459 /// constraints does not prevent the parent `Node` from completing its
8460 /// buffer allocation.
8461 ///
8462 /// An initiator (creator of the root `Node` using
8463 /// [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`]) may in some
8464 /// scenarios choose to initially use a dispensable `Node` for a first
8465 /// instance of a participant, and then later if the first instance of that
8466 /// participant fails, a new second instance of that participant my be given
8467 /// a `BufferCollectionToken` created with `AttachToken`.
8468 ///
8469 /// Normally a client will `SetDispensable` on a `BufferCollectionToken`
8470 /// shortly before sending the dispensable `BufferCollectionToken` to a
8471 /// delegate participant. Because `SetDispensable` prevents propagation of
8472 /// child `Node` failure to parent `Node`(s), if the client was relying on
8473 /// noticing child failure via failure of the parent `Node` retained by the
8474 /// client, the client may instead need to notice failure via other means.
8475 /// If other means aren't available/convenient, the client can instead
8476 /// retain the dispensable `Node` and create a child `Node` under that to
8477 /// send to the delegate participant, retaining this `Node` in order to
8478 /// notice failure of the subtree rooted at this `Node` via this `Node`'s
8479 /// ZX_CHANNEL_PEER_CLOSED signal, and take whatever action is appropriate
8480 /// (e.g. starting a new instance of the delegate participant and handing it
8481 /// a `BufferCollectionToken` created using
8482 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`], or propagate failure
8483 /// and clean up in a client-specific way).
8484 ///
8485 /// While it is possible (and potentially useful) to `SetDispensable` on a
8486 /// direct child of a `BufferCollectionTokenGroup` `Node`, it isn't possible
8487 /// to later replace a failed dispensable `Node` that was a direct child of
8488 /// a `BufferCollectionTokenGroup` with a new token using `AttachToken`
8489 /// (since there's no `AttachToken` on a group). Instead, to enable
8490 /// `AttachToken` replacement in this case, create an additional
8491 /// non-dispensable token that's a direct child of the group and make the
8492 /// existing dispensable token a child of the additional token. This way,
8493 /// the additional token that is a direct child of the group has
8494 /// `BufferCollection.AttachToken` which can be used to replace the failed
8495 /// dispensable token.
8496 ///
8497 /// `SetDispensable` on an already-dispensable token is idempotent.
8498 SetDispensable { control_handle: BufferCollectionTokenControlHandle },
8499 /// Create a logical OR among a set of tokens, called a
8500 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
8501 ///
8502 /// Most sysmem clients and many participants don't need to care about this
8503 /// message or about `BufferCollectionTokenGroup`(s). However, in some cases
8504 /// a participant wants to attempt to include one set of delegate
8505 /// participants, but if constraints don't combine successfully that way,
8506 /// fall back to a different (possibly overlapping) set of delegate
8507 /// participants, and/or fall back to a less demanding strategy (in terms of
8508 /// how strict the [`fuchisa.sysmem2/BufferCollectionConstraints`] are,
8509 /// across all involved delegate participants). In such cases, a
8510 /// `BufferCollectionTokenGroup` is useful.
8511 ///
8512 /// A `BufferCollectionTokenGroup` is used to create a 1 of N OR among N
8513 /// child [`fuchsia.sysmem2/BufferCollectionToken`](s). The child tokens
8514 /// which are not selected during aggregation will fail (close), which a
8515 /// potential participant should notice when their `BufferCollection`
8516 /// channel client endpoint sees PEER_CLOSED, allowing the participant to
8517 /// clean up the speculative usage that didn't end up happening (this is
8518 /// simimlar to a normal `BufferCollection` server end closing on failure to
8519 /// allocate a logical buffer collection or later async failure of a buffer
8520 /// collection).
8521 ///
8522 /// See comments on protocol `BufferCollectionTokenGroup`.
8523 ///
8524 /// Any `rights_attenuation_mask` or `AttachToken`/`SetDispensable` to be
8525 /// applied to the whole group can be achieved with a
8526 /// `BufferCollectionToken` for this purpose as a direct parent of the
8527 /// `BufferCollectionTokenGroup`.
8528 ///
8529 /// All table fields are currently required.
8530 ///
8531 /// + request `group_request` The server end of a
8532 /// `BufferCollectionTokenGroup` channel to be served by sysmem.
8533 CreateBufferCollectionTokenGroup {
8534 payload: BufferCollectionTokenCreateBufferCollectionTokenGroupRequest,
8535 control_handle: BufferCollectionTokenControlHandle,
8536 },
8537 /// An interaction was received which does not match any known method.
8538 #[non_exhaustive]
8539 _UnknownMethod {
8540 /// Ordinal of the method that was called.
8541 ordinal: u64,
8542 control_handle: BufferCollectionTokenControlHandle,
8543 method_type: fidl::MethodType,
8544 },
8545}
8546
8547impl BufferCollectionTokenRequest {
8548 #[allow(irrefutable_let_patterns)]
8549 pub fn into_sync(self) -> Option<(BufferCollectionTokenSyncResponder)> {
8550 if let BufferCollectionTokenRequest::Sync { responder } = self {
8551 Some((responder))
8552 } else {
8553 None
8554 }
8555 }
8556
8557 #[allow(irrefutable_let_patterns)]
8558 pub fn into_release(self) -> Option<(BufferCollectionTokenControlHandle)> {
8559 if let BufferCollectionTokenRequest::Release { control_handle } = self {
8560 Some((control_handle))
8561 } else {
8562 None
8563 }
8564 }
8565
8566 #[allow(irrefutable_let_patterns)]
8567 pub fn into_set_name(self) -> Option<(NodeSetNameRequest, BufferCollectionTokenControlHandle)> {
8568 if let BufferCollectionTokenRequest::SetName { payload, control_handle } = self {
8569 Some((payload, control_handle))
8570 } else {
8571 None
8572 }
8573 }
8574
8575 #[allow(irrefutable_let_patterns)]
8576 pub fn into_set_debug_client_info(
8577 self,
8578 ) -> Option<(NodeSetDebugClientInfoRequest, BufferCollectionTokenControlHandle)> {
8579 if let BufferCollectionTokenRequest::SetDebugClientInfo { payload, control_handle } = self {
8580 Some((payload, control_handle))
8581 } else {
8582 None
8583 }
8584 }
8585
8586 #[allow(irrefutable_let_patterns)]
8587 pub fn into_set_debug_timeout_log_deadline(
8588 self,
8589 ) -> Option<(NodeSetDebugTimeoutLogDeadlineRequest, BufferCollectionTokenControlHandle)> {
8590 if let BufferCollectionTokenRequest::SetDebugTimeoutLogDeadline {
8591 payload,
8592 control_handle,
8593 } = self
8594 {
8595 Some((payload, control_handle))
8596 } else {
8597 None
8598 }
8599 }
8600
8601 #[allow(irrefutable_let_patterns)]
8602 pub fn into_set_verbose_logging(self) -> Option<(BufferCollectionTokenControlHandle)> {
8603 if let BufferCollectionTokenRequest::SetVerboseLogging { control_handle } = self {
8604 Some((control_handle))
8605 } else {
8606 None
8607 }
8608 }
8609
8610 #[allow(irrefutable_let_patterns)]
8611 pub fn into_get_node_ref(self) -> Option<(BufferCollectionTokenGetNodeRefResponder)> {
8612 if let BufferCollectionTokenRequest::GetNodeRef { responder } = self {
8613 Some((responder))
8614 } else {
8615 None
8616 }
8617 }
8618
8619 #[allow(irrefutable_let_patterns)]
8620 pub fn into_is_alternate_for(
8621 self,
8622 ) -> Option<(NodeIsAlternateForRequest, BufferCollectionTokenIsAlternateForResponder)> {
8623 if let BufferCollectionTokenRequest::IsAlternateFor { payload, responder } = self {
8624 Some((payload, responder))
8625 } else {
8626 None
8627 }
8628 }
8629
8630 #[allow(irrefutable_let_patterns)]
8631 pub fn into_get_buffer_collection_id(
8632 self,
8633 ) -> Option<(BufferCollectionTokenGetBufferCollectionIdResponder)> {
8634 if let BufferCollectionTokenRequest::GetBufferCollectionId { responder } = self {
8635 Some((responder))
8636 } else {
8637 None
8638 }
8639 }
8640
8641 #[allow(irrefutable_let_patterns)]
8642 pub fn into_set_weak(self) -> Option<(BufferCollectionTokenControlHandle)> {
8643 if let BufferCollectionTokenRequest::SetWeak { control_handle } = self {
8644 Some((control_handle))
8645 } else {
8646 None
8647 }
8648 }
8649
8650 #[allow(irrefutable_let_patterns)]
8651 pub fn into_set_weak_ok(
8652 self,
8653 ) -> Option<(NodeSetWeakOkRequest, BufferCollectionTokenControlHandle)> {
8654 if let BufferCollectionTokenRequest::SetWeakOk { payload, control_handle } = self {
8655 Some((payload, control_handle))
8656 } else {
8657 None
8658 }
8659 }
8660
8661 #[allow(irrefutable_let_patterns)]
8662 pub fn into_attach_node_tracking(
8663 self,
8664 ) -> Option<(NodeAttachNodeTrackingRequest, BufferCollectionTokenControlHandle)> {
8665 if let BufferCollectionTokenRequest::AttachNodeTracking { payload, control_handle } = self {
8666 Some((payload, control_handle))
8667 } else {
8668 None
8669 }
8670 }
8671
8672 #[allow(irrefutable_let_patterns)]
8673 pub fn into_duplicate_sync(
8674 self,
8675 ) -> Option<(
8676 BufferCollectionTokenDuplicateSyncRequest,
8677 BufferCollectionTokenDuplicateSyncResponder,
8678 )> {
8679 if let BufferCollectionTokenRequest::DuplicateSync { payload, responder } = self {
8680 Some((payload, responder))
8681 } else {
8682 None
8683 }
8684 }
8685
8686 #[allow(irrefutable_let_patterns)]
8687 pub fn into_duplicate(
8688 self,
8689 ) -> Option<(BufferCollectionTokenDuplicateRequest, BufferCollectionTokenControlHandle)> {
8690 if let BufferCollectionTokenRequest::Duplicate { payload, control_handle } = self {
8691 Some((payload, control_handle))
8692 } else {
8693 None
8694 }
8695 }
8696
8697 #[allow(irrefutable_let_patterns)]
8698 pub fn into_set_dispensable(self) -> Option<(BufferCollectionTokenControlHandle)> {
8699 if let BufferCollectionTokenRequest::SetDispensable { control_handle } = self {
8700 Some((control_handle))
8701 } else {
8702 None
8703 }
8704 }
8705
8706 #[allow(irrefutable_let_patterns)]
8707 pub fn into_create_buffer_collection_token_group(
8708 self,
8709 ) -> Option<(
8710 BufferCollectionTokenCreateBufferCollectionTokenGroupRequest,
8711 BufferCollectionTokenControlHandle,
8712 )> {
8713 if let BufferCollectionTokenRequest::CreateBufferCollectionTokenGroup {
8714 payload,
8715 control_handle,
8716 } = self
8717 {
8718 Some((payload, control_handle))
8719 } else {
8720 None
8721 }
8722 }
8723
8724 /// Name of the method defined in FIDL
8725 pub fn method_name(&self) -> &'static str {
8726 match *self {
8727 BufferCollectionTokenRequest::Sync { .. } => "sync",
8728 BufferCollectionTokenRequest::Release { .. } => "release",
8729 BufferCollectionTokenRequest::SetName { .. } => "set_name",
8730 BufferCollectionTokenRequest::SetDebugClientInfo { .. } => "set_debug_client_info",
8731 BufferCollectionTokenRequest::SetDebugTimeoutLogDeadline { .. } => {
8732 "set_debug_timeout_log_deadline"
8733 }
8734 BufferCollectionTokenRequest::SetVerboseLogging { .. } => "set_verbose_logging",
8735 BufferCollectionTokenRequest::GetNodeRef { .. } => "get_node_ref",
8736 BufferCollectionTokenRequest::IsAlternateFor { .. } => "is_alternate_for",
8737 BufferCollectionTokenRequest::GetBufferCollectionId { .. } => {
8738 "get_buffer_collection_id"
8739 }
8740 BufferCollectionTokenRequest::SetWeak { .. } => "set_weak",
8741 BufferCollectionTokenRequest::SetWeakOk { .. } => "set_weak_ok",
8742 BufferCollectionTokenRequest::AttachNodeTracking { .. } => "attach_node_tracking",
8743 BufferCollectionTokenRequest::DuplicateSync { .. } => "duplicate_sync",
8744 BufferCollectionTokenRequest::Duplicate { .. } => "duplicate",
8745 BufferCollectionTokenRequest::SetDispensable { .. } => "set_dispensable",
8746 BufferCollectionTokenRequest::CreateBufferCollectionTokenGroup { .. } => {
8747 "create_buffer_collection_token_group"
8748 }
8749 BufferCollectionTokenRequest::_UnknownMethod {
8750 method_type: fidl::MethodType::OneWay,
8751 ..
8752 } => "unknown one-way method",
8753 BufferCollectionTokenRequest::_UnknownMethod {
8754 method_type: fidl::MethodType::TwoWay,
8755 ..
8756 } => "unknown two-way method",
8757 }
8758 }
8759}
8760
8761#[derive(Debug, Clone)]
8762pub struct BufferCollectionTokenControlHandle {
8763 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
8764}
8765
8766impl fidl::endpoints::ControlHandle for BufferCollectionTokenControlHandle {
8767 fn shutdown(&self) {
8768 self.inner.shutdown()
8769 }
8770
8771 fn shutdown_with_epitaph(&self, status: zx_status::Status) {
8772 self.inner.shutdown_with_epitaph(status)
8773 }
8774
8775 fn is_closed(&self) -> bool {
8776 self.inner.channel().is_closed()
8777 }
8778 fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
8779 self.inner.channel().on_closed()
8780 }
8781
8782 #[cfg(target_os = "fuchsia")]
8783 fn signal_peer(
8784 &self,
8785 clear_mask: zx::Signals,
8786 set_mask: zx::Signals,
8787 ) -> Result<(), zx_status::Status> {
8788 use fidl::Peered;
8789 self.inner.channel().signal_peer(clear_mask, set_mask)
8790 }
8791}
8792
8793impl BufferCollectionTokenControlHandle {}
8794
8795#[must_use = "FIDL methods require a response to be sent"]
8796#[derive(Debug)]
8797pub struct BufferCollectionTokenSyncResponder {
8798 control_handle: std::mem::ManuallyDrop<BufferCollectionTokenControlHandle>,
8799 tx_id: u32,
8800}
8801
8802/// Set the the channel to be shutdown (see [`BufferCollectionTokenControlHandle::shutdown`])
8803/// if the responder is dropped without sending a response, so that the client
8804/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
8805impl std::ops::Drop for BufferCollectionTokenSyncResponder {
8806 fn drop(&mut self) {
8807 self.control_handle.shutdown();
8808 // Safety: drops once, never accessed again
8809 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
8810 }
8811}
8812
8813impl fidl::endpoints::Responder for BufferCollectionTokenSyncResponder {
8814 type ControlHandle = BufferCollectionTokenControlHandle;
8815
8816 fn control_handle(&self) -> &BufferCollectionTokenControlHandle {
8817 &self.control_handle
8818 }
8819
8820 fn drop_without_shutdown(mut self) {
8821 // Safety: drops once, never accessed again due to mem::forget
8822 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
8823 // Prevent Drop from running (which would shut down the channel)
8824 std::mem::forget(self);
8825 }
8826}
8827
8828impl BufferCollectionTokenSyncResponder {
8829 /// Sends a response to the FIDL transaction.
8830 ///
8831 /// Sets the channel to shutdown if an error occurs.
8832 pub fn send(self) -> Result<(), fidl::Error> {
8833 let _result = self.send_raw();
8834 if _result.is_err() {
8835 self.control_handle.shutdown();
8836 }
8837 self.drop_without_shutdown();
8838 _result
8839 }
8840
8841 /// Similar to "send" but does not shutdown the channel if an error occurs.
8842 pub fn send_no_shutdown_on_err(self) -> Result<(), fidl::Error> {
8843 let _result = self.send_raw();
8844 self.drop_without_shutdown();
8845 _result
8846 }
8847
8848 fn send_raw(&self) -> Result<(), fidl::Error> {
8849 self.control_handle.inner.send::<fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>>(
8850 fidl::encoding::Flexible::new(()),
8851 self.tx_id,
8852 0x11ac2555cf575b54,
8853 fidl::encoding::DynamicFlags::FLEXIBLE,
8854 )
8855 }
8856}
8857
8858#[must_use = "FIDL methods require a response to be sent"]
8859#[derive(Debug)]
8860pub struct BufferCollectionTokenGetNodeRefResponder {
8861 control_handle: std::mem::ManuallyDrop<BufferCollectionTokenControlHandle>,
8862 tx_id: u32,
8863}
8864
8865/// Set the the channel to be shutdown (see [`BufferCollectionTokenControlHandle::shutdown`])
8866/// if the responder is dropped without sending a response, so that the client
8867/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
8868impl std::ops::Drop for BufferCollectionTokenGetNodeRefResponder {
8869 fn drop(&mut self) {
8870 self.control_handle.shutdown();
8871 // Safety: drops once, never accessed again
8872 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
8873 }
8874}
8875
8876impl fidl::endpoints::Responder for BufferCollectionTokenGetNodeRefResponder {
8877 type ControlHandle = BufferCollectionTokenControlHandle;
8878
8879 fn control_handle(&self) -> &BufferCollectionTokenControlHandle {
8880 &self.control_handle
8881 }
8882
8883 fn drop_without_shutdown(mut self) {
8884 // Safety: drops once, never accessed again due to mem::forget
8885 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
8886 // Prevent Drop from running (which would shut down the channel)
8887 std::mem::forget(self);
8888 }
8889}
8890
8891impl BufferCollectionTokenGetNodeRefResponder {
8892 /// Sends a response to the FIDL transaction.
8893 ///
8894 /// Sets the channel to shutdown if an error occurs.
8895 pub fn send(self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
8896 let _result = self.send_raw(payload);
8897 if _result.is_err() {
8898 self.control_handle.shutdown();
8899 }
8900 self.drop_without_shutdown();
8901 _result
8902 }
8903
8904 /// Similar to "send" but does not shutdown the channel if an error occurs.
8905 pub fn send_no_shutdown_on_err(
8906 self,
8907 mut payload: NodeGetNodeRefResponse,
8908 ) -> Result<(), fidl::Error> {
8909 let _result = self.send_raw(payload);
8910 self.drop_without_shutdown();
8911 _result
8912 }
8913
8914 fn send_raw(&self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
8915 self.control_handle.inner.send::<fidl::encoding::FlexibleType<NodeGetNodeRefResponse>>(
8916 fidl::encoding::Flexible::new(&mut payload),
8917 self.tx_id,
8918 0x5b3d0e51614df053,
8919 fidl::encoding::DynamicFlags::FLEXIBLE,
8920 )
8921 }
8922}
8923
8924#[must_use = "FIDL methods require a response to be sent"]
8925#[derive(Debug)]
8926pub struct BufferCollectionTokenIsAlternateForResponder {
8927 control_handle: std::mem::ManuallyDrop<BufferCollectionTokenControlHandle>,
8928 tx_id: u32,
8929}
8930
8931/// Set the the channel to be shutdown (see [`BufferCollectionTokenControlHandle::shutdown`])
8932/// if the responder is dropped without sending a response, so that the client
8933/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
8934impl std::ops::Drop for BufferCollectionTokenIsAlternateForResponder {
8935 fn drop(&mut self) {
8936 self.control_handle.shutdown();
8937 // Safety: drops once, never accessed again
8938 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
8939 }
8940}
8941
8942impl fidl::endpoints::Responder for BufferCollectionTokenIsAlternateForResponder {
8943 type ControlHandle = BufferCollectionTokenControlHandle;
8944
8945 fn control_handle(&self) -> &BufferCollectionTokenControlHandle {
8946 &self.control_handle
8947 }
8948
8949 fn drop_without_shutdown(mut self) {
8950 // Safety: drops once, never accessed again due to mem::forget
8951 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
8952 // Prevent Drop from running (which would shut down the channel)
8953 std::mem::forget(self);
8954 }
8955}
8956
8957impl BufferCollectionTokenIsAlternateForResponder {
8958 /// Sends a response to the FIDL transaction.
8959 ///
8960 /// Sets the channel to shutdown if an error occurs.
8961 pub fn send(
8962 self,
8963 mut result: Result<&NodeIsAlternateForResponse, Error>,
8964 ) -> Result<(), fidl::Error> {
8965 let _result = self.send_raw(result);
8966 if _result.is_err() {
8967 self.control_handle.shutdown();
8968 }
8969 self.drop_without_shutdown();
8970 _result
8971 }
8972
8973 /// Similar to "send" but does not shutdown the channel if an error occurs.
8974 pub fn send_no_shutdown_on_err(
8975 self,
8976 mut result: Result<&NodeIsAlternateForResponse, Error>,
8977 ) -> Result<(), fidl::Error> {
8978 let _result = self.send_raw(result);
8979 self.drop_without_shutdown();
8980 _result
8981 }
8982
8983 fn send_raw(
8984 &self,
8985 mut result: Result<&NodeIsAlternateForResponse, Error>,
8986 ) -> Result<(), fidl::Error> {
8987 self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
8988 NodeIsAlternateForResponse,
8989 Error,
8990 >>(
8991 fidl::encoding::FlexibleResult::new(result),
8992 self.tx_id,
8993 0x3a58e00157e0825,
8994 fidl::encoding::DynamicFlags::FLEXIBLE,
8995 )
8996 }
8997}
8998
8999#[must_use = "FIDL methods require a response to be sent"]
9000#[derive(Debug)]
9001pub struct BufferCollectionTokenGetBufferCollectionIdResponder {
9002 control_handle: std::mem::ManuallyDrop<BufferCollectionTokenControlHandle>,
9003 tx_id: u32,
9004}
9005
9006/// Set the the channel to be shutdown (see [`BufferCollectionTokenControlHandle::shutdown`])
9007/// if the responder is dropped without sending a response, so that the client
9008/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
9009impl std::ops::Drop for BufferCollectionTokenGetBufferCollectionIdResponder {
9010 fn drop(&mut self) {
9011 self.control_handle.shutdown();
9012 // Safety: drops once, never accessed again
9013 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
9014 }
9015}
9016
9017impl fidl::endpoints::Responder for BufferCollectionTokenGetBufferCollectionIdResponder {
9018 type ControlHandle = BufferCollectionTokenControlHandle;
9019
9020 fn control_handle(&self) -> &BufferCollectionTokenControlHandle {
9021 &self.control_handle
9022 }
9023
9024 fn drop_without_shutdown(mut self) {
9025 // Safety: drops once, never accessed again due to mem::forget
9026 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
9027 // Prevent Drop from running (which would shut down the channel)
9028 std::mem::forget(self);
9029 }
9030}
9031
9032impl BufferCollectionTokenGetBufferCollectionIdResponder {
9033 /// Sends a response to the FIDL transaction.
9034 ///
9035 /// Sets the channel to shutdown if an error occurs.
9036 pub fn send(self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
9037 let _result = self.send_raw(payload);
9038 if _result.is_err() {
9039 self.control_handle.shutdown();
9040 }
9041 self.drop_without_shutdown();
9042 _result
9043 }
9044
9045 /// Similar to "send" but does not shutdown the channel if an error occurs.
9046 pub fn send_no_shutdown_on_err(
9047 self,
9048 mut payload: &NodeGetBufferCollectionIdResponse,
9049 ) -> Result<(), fidl::Error> {
9050 let _result = self.send_raw(payload);
9051 self.drop_without_shutdown();
9052 _result
9053 }
9054
9055 fn send_raw(&self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
9056 self.control_handle
9057 .inner
9058 .send::<fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>>(
9059 fidl::encoding::Flexible::new(payload),
9060 self.tx_id,
9061 0x77d19a494b78ba8c,
9062 fidl::encoding::DynamicFlags::FLEXIBLE,
9063 )
9064 }
9065}
9066
9067#[must_use = "FIDL methods require a response to be sent"]
9068#[derive(Debug)]
9069pub struct BufferCollectionTokenDuplicateSyncResponder {
9070 control_handle: std::mem::ManuallyDrop<BufferCollectionTokenControlHandle>,
9071 tx_id: u32,
9072}
9073
9074/// Set the the channel to be shutdown (see [`BufferCollectionTokenControlHandle::shutdown`])
9075/// if the responder is dropped without sending a response, so that the client
9076/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
9077impl std::ops::Drop for BufferCollectionTokenDuplicateSyncResponder {
9078 fn drop(&mut self) {
9079 self.control_handle.shutdown();
9080 // Safety: drops once, never accessed again
9081 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
9082 }
9083}
9084
9085impl fidl::endpoints::Responder for BufferCollectionTokenDuplicateSyncResponder {
9086 type ControlHandle = BufferCollectionTokenControlHandle;
9087
9088 fn control_handle(&self) -> &BufferCollectionTokenControlHandle {
9089 &self.control_handle
9090 }
9091
9092 fn drop_without_shutdown(mut self) {
9093 // Safety: drops once, never accessed again due to mem::forget
9094 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
9095 // Prevent Drop from running (which would shut down the channel)
9096 std::mem::forget(self);
9097 }
9098}
9099
9100impl BufferCollectionTokenDuplicateSyncResponder {
9101 /// Sends a response to the FIDL transaction.
9102 ///
9103 /// Sets the channel to shutdown if an error occurs.
9104 pub fn send(
9105 self,
9106 mut payload: BufferCollectionTokenDuplicateSyncResponse,
9107 ) -> Result<(), fidl::Error> {
9108 let _result = self.send_raw(payload);
9109 if _result.is_err() {
9110 self.control_handle.shutdown();
9111 }
9112 self.drop_without_shutdown();
9113 _result
9114 }
9115
9116 /// Similar to "send" but does not shutdown the channel if an error occurs.
9117 pub fn send_no_shutdown_on_err(
9118 self,
9119 mut payload: BufferCollectionTokenDuplicateSyncResponse,
9120 ) -> Result<(), fidl::Error> {
9121 let _result = self.send_raw(payload);
9122 self.drop_without_shutdown();
9123 _result
9124 }
9125
9126 fn send_raw(
9127 &self,
9128 mut payload: BufferCollectionTokenDuplicateSyncResponse,
9129 ) -> Result<(), fidl::Error> {
9130 self.control_handle.inner.send::<fidl::encoding::FlexibleType<
9131 BufferCollectionTokenDuplicateSyncResponse,
9132 >>(
9133 fidl::encoding::Flexible::new(&mut payload),
9134 self.tx_id,
9135 0x1c1af9919d1ca45c,
9136 fidl::encoding::DynamicFlags::FLEXIBLE,
9137 )
9138 }
9139}
9140
9141#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
9142pub struct BufferCollectionTokenGroupMarker;
9143
9144impl fidl::endpoints::ProtocolMarker for BufferCollectionTokenGroupMarker {
9145 type Proxy = BufferCollectionTokenGroupProxy;
9146 type RequestStream = BufferCollectionTokenGroupRequestStream;
9147 #[cfg(target_os = "fuchsia")]
9148 type SynchronousProxy = BufferCollectionTokenGroupSynchronousProxy;
9149
9150 const DEBUG_NAME: &'static str = "(anonymous) BufferCollectionTokenGroup";
9151}
9152
9153pub trait BufferCollectionTokenGroupProxyInterface: Send + Sync {
9154 type SyncResponseFut: std::future::Future<Output = Result<(), fidl::Error>> + Send;
9155 fn r#sync(&self) -> Self::SyncResponseFut;
9156 fn r#release(&self) -> Result<(), fidl::Error>;
9157 fn r#set_name(&self, payload: &NodeSetNameRequest) -> Result<(), fidl::Error>;
9158 fn r#set_debug_client_info(
9159 &self,
9160 payload: &NodeSetDebugClientInfoRequest,
9161 ) -> Result<(), fidl::Error>;
9162 fn r#set_debug_timeout_log_deadline(
9163 &self,
9164 payload: &NodeSetDebugTimeoutLogDeadlineRequest,
9165 ) -> Result<(), fidl::Error>;
9166 fn r#set_verbose_logging(&self) -> Result<(), fidl::Error>;
9167 type GetNodeRefResponseFut: std::future::Future<Output = Result<NodeGetNodeRefResponse, fidl::Error>>
9168 + Send;
9169 fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut;
9170 type IsAlternateForResponseFut: std::future::Future<Output = Result<NodeIsAlternateForResult, fidl::Error>>
9171 + Send;
9172 fn r#is_alternate_for(
9173 &self,
9174 payload: NodeIsAlternateForRequest,
9175 ) -> Self::IsAlternateForResponseFut;
9176 type GetBufferCollectionIdResponseFut: std::future::Future<Output = Result<NodeGetBufferCollectionIdResponse, fidl::Error>>
9177 + Send;
9178 fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut;
9179 fn r#set_weak(&self) -> Result<(), fidl::Error>;
9180 fn r#set_weak_ok(&self, payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error>;
9181 fn r#attach_node_tracking(
9182 &self,
9183 payload: NodeAttachNodeTrackingRequest,
9184 ) -> Result<(), fidl::Error>;
9185 fn r#create_child(
9186 &self,
9187 payload: BufferCollectionTokenGroupCreateChildRequest,
9188 ) -> Result<(), fidl::Error>;
9189 type CreateChildrenSyncResponseFut: std::future::Future<
9190 Output = Result<BufferCollectionTokenGroupCreateChildrenSyncResponse, fidl::Error>,
9191 > + Send;
9192 fn r#create_children_sync(
9193 &self,
9194 payload: &BufferCollectionTokenGroupCreateChildrenSyncRequest,
9195 ) -> Self::CreateChildrenSyncResponseFut;
9196 fn r#all_children_present(&self) -> Result<(), fidl::Error>;
9197}
9198#[derive(Debug)]
9199#[cfg(target_os = "fuchsia")]
9200pub struct BufferCollectionTokenGroupSynchronousProxy {
9201 client: fidl::client::sync::Client,
9202}
9203
9204#[cfg(target_os = "fuchsia")]
9205impl fidl::endpoints::SynchronousProxy for BufferCollectionTokenGroupSynchronousProxy {
9206 type Proxy = BufferCollectionTokenGroupProxy;
9207 type Protocol = BufferCollectionTokenGroupMarker;
9208
9209 fn from_channel(inner: fidl::Channel) -> Self {
9210 Self::new(inner)
9211 }
9212
9213 fn into_channel(self) -> fidl::Channel {
9214 self.client.into_channel()
9215 }
9216
9217 fn as_channel(&self) -> &fidl::Channel {
9218 self.client.as_channel()
9219 }
9220}
9221
9222#[cfg(target_os = "fuchsia")]
9223impl BufferCollectionTokenGroupSynchronousProxy {
9224 pub fn new(channel: fidl::Channel) -> Self {
9225 let protocol_name =
9226 <BufferCollectionTokenGroupMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
9227 Self { client: fidl::client::sync::Client::new(channel, protocol_name) }
9228 }
9229
9230 pub fn into_channel(self) -> fidl::Channel {
9231 self.client.into_channel()
9232 }
9233
9234 /// Waits until an event arrives and returns it. It is safe for other
9235 /// threads to make concurrent requests while waiting for an event.
9236 pub fn wait_for_event(
9237 &self,
9238 deadline: zx::MonotonicInstant,
9239 ) -> Result<BufferCollectionTokenGroupEvent, fidl::Error> {
9240 BufferCollectionTokenGroupEvent::decode(self.client.wait_for_event(deadline)?)
9241 }
9242
9243 /// Ensure that previous messages have been received server side. This is
9244 /// particularly useful after previous messages that created new tokens,
9245 /// because a token must be known to the sysmem server before sending the
9246 /// token to another participant.
9247 ///
9248 /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
9249 /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
9250 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
9251 /// to mitigate the possibility of a hostile/fake
9252 /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
9253 /// Another way is to pass the token to
9254 /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
9255 /// the token as part of exchanging it for a
9256 /// [`fuchsia.sysmem2/BufferCollection`] channel, and
9257 /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
9258 /// of stalling.
9259 ///
9260 /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
9261 /// and then starting and completing a `Sync`, it's then safe to send the
9262 /// `BufferCollectionToken` client ends to other participants knowing the
9263 /// server will recognize the tokens when they're sent by the other
9264 /// participants to sysmem in a
9265 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
9266 /// efficient way to create tokens while avoiding unnecessary round trips.
9267 ///
9268 /// Other options include waiting for each
9269 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
9270 /// individually (using separate call to `Sync` after each), or calling
9271 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
9272 /// converted to a `BufferCollection` via
9273 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
9274 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
9275 /// the sync step and can create multiple tokens at once.
9276 pub fn r#sync(&self, ___deadline: zx::MonotonicInstant) -> Result<(), fidl::Error> {
9277 let _response = self.client.send_query::<
9278 fidl::encoding::EmptyPayload,
9279 fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
9280 >(
9281 (),
9282 0x11ac2555cf575b54,
9283 fidl::encoding::DynamicFlags::FLEXIBLE,
9284 ___deadline,
9285 )?
9286 .into_result::<BufferCollectionTokenGroupMarker>("sync")?;
9287 Ok(_response)
9288 }
9289
9290 /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
9291 ///
9292 /// Normally a participant will convert a `BufferCollectionToken` into a
9293 /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
9294 /// `Release` via the token (and then close the channel immediately or
9295 /// shortly later in response to server closing the server end), which
9296 /// avoids causing buffer collection failure. Without a prior `Release`,
9297 /// closing the `BufferCollectionToken` client end will cause buffer
9298 /// collection failure.
9299 ///
9300 /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
9301 ///
9302 /// By default the server handles unexpected closure of a
9303 /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
9304 /// first) by failing the buffer collection. Partly this is to expedite
9305 /// closing VMO handles to reclaim memory when any participant fails. If a
9306 /// participant would like to cleanly close a `BufferCollection` without
9307 /// causing buffer collection failure, the participant can send `Release`
9308 /// before closing the `BufferCollection` client end. The `Release` can
9309 /// occur before or after `SetConstraints`. If before `SetConstraints`, the
9310 /// buffer collection won't require constraints from this node in order to
9311 /// allocate. If after `SetConstraints`, the constraints are retained and
9312 /// aggregated, despite the lack of `BufferCollection` connection at the
9313 /// time of constraints aggregation.
9314 ///
9315 /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
9316 ///
9317 /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
9318 /// end (without `Release` first) will trigger failure of the buffer
9319 /// collection. To close a `BufferCollectionTokenGroup` channel without
9320 /// failing the buffer collection, ensure that AllChildrenPresent() has been
9321 /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
9322 /// client end.
9323 ///
9324 /// If `Release` occurs before
9325 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
9326 /// buffer collection will fail (triggered by reception of `Release` without
9327 /// prior `AllChildrenPresent`). This is intentionally not analogous to how
9328 /// [`fuchsia.sysmem2/BufferCollection.Release`] without
9329 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
9330 /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
9331 /// close requires `AllChildrenPresent` (if not already sent), then
9332 /// `Release`, then close client end.
9333 ///
9334 /// If `Release` occurs after `AllChildrenPresent`, the children and all
9335 /// their constraints remain intact (just as they would if the
9336 /// `BufferCollectionTokenGroup` channel had remained open), and the client
9337 /// end close doesn't trigger buffer collection failure.
9338 ///
9339 /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
9340 ///
9341 /// For brevity, the per-channel-protocol paragraphs above ignore the
9342 /// separate failure domain created by
9343 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
9344 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
9345 /// unexpectedly closes (without `Release` first) and that client end is
9346 /// under a failure domain, instead of failing the whole buffer collection,
9347 /// the failure domain is failed, but the buffer collection itself is
9348 /// isolated from failure of the failure domain. Such failure domains can be
9349 /// nested, in which case only the inner-most failure domain in which the
9350 /// `Node` resides fails.
9351 pub fn r#release(&self) -> Result<(), fidl::Error> {
9352 self.client.send::<fidl::encoding::EmptyPayload>(
9353 (),
9354 0x6a5cae7d6d6e04c6,
9355 fidl::encoding::DynamicFlags::FLEXIBLE,
9356 )
9357 }
9358
9359 /// Set a name for VMOs in this buffer collection.
9360 ///
9361 /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
9362 /// will be truncated to fit. The name of the vmo will be suffixed with the
9363 /// buffer index within the collection (if the suffix fits within
9364 /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
9365 /// listed in the inspect data.
9366 ///
9367 /// The name only affects VMOs allocated after the name is set; this call
9368 /// does not rename existing VMOs. If multiple clients set different names
9369 /// then the larger priority value will win. Setting a new name with the
9370 /// same priority as a prior name doesn't change the name.
9371 ///
9372 /// All table fields are currently required.
9373 ///
9374 /// + request `priority` The name is only set if this is the first `SetName`
9375 /// or if `priority` is greater than any previous `priority` value in
9376 /// prior `SetName` calls across all `Node`(s) of this buffer collection.
9377 /// + request `name` The name for VMOs created under this buffer collection.
9378 pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
9379 self.client.send::<NodeSetNameRequest>(
9380 payload,
9381 0xb41f1624f48c1e9,
9382 fidl::encoding::DynamicFlags::FLEXIBLE,
9383 )
9384 }
9385
9386 /// Set information about the current client that can be used by sysmem to
9387 /// help diagnose leaking memory and allocation stalls waiting for a
9388 /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
9389 ///
9390 /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
9391 /// `Node`(s) derived from this `Node`, unless overriden by
9392 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
9393 /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
9394 ///
9395 /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
9396 /// `Allocator` is the most efficient way to ensure that all
9397 /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
9398 /// set, and is also more efficient than separately sending the same debug
9399 /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
9400 /// created [`fuchsia.sysmem2/Node`].
9401 ///
9402 /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
9403 /// indicate which client is closing their channel first, leading to subtree
9404 /// failure (which can be normal if the purpose of the subtree is over, but
9405 /// if happening earlier than expected, the client-channel-specific name can
9406 /// help diagnose where the failure is first coming from, from sysmem's
9407 /// point of view).
9408 ///
9409 /// All table fields are currently required.
9410 ///
9411 /// + request `name` This can be an arbitrary string, but the current
9412 /// process name (see `fsl::GetCurrentProcessName`) is a good default.
9413 /// + request `id` This can be an arbitrary id, but the current process ID
9414 /// (see `fsl::GetCurrentProcessKoid`) is a good default.
9415 pub fn r#set_debug_client_info(
9416 &self,
9417 mut payload: &NodeSetDebugClientInfoRequest,
9418 ) -> Result<(), fidl::Error> {
9419 self.client.send::<NodeSetDebugClientInfoRequest>(
9420 payload,
9421 0x5cde8914608d99b1,
9422 fidl::encoding::DynamicFlags::FLEXIBLE,
9423 )
9424 }
9425
9426 /// Sysmem logs a warning if sysmem hasn't seen
9427 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
9428 /// within 5 seconds after creation of a new collection.
9429 ///
9430 /// Clients can call this method to change when the log is printed. If
9431 /// multiple client set the deadline, it's unspecified which deadline will
9432 /// take effect.
9433 ///
9434 /// In most cases the default works well.
9435 ///
9436 /// All table fields are currently required.
9437 ///
9438 /// + request `deadline` The time at which sysmem will start trying to log
9439 /// the warning, unless all constraints are with sysmem by then.
9440 pub fn r#set_debug_timeout_log_deadline(
9441 &self,
9442 mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
9443 ) -> Result<(), fidl::Error> {
9444 self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
9445 payload,
9446 0x716b0af13d5c0806,
9447 fidl::encoding::DynamicFlags::FLEXIBLE,
9448 )
9449 }
9450
9451 /// This enables verbose logging for the buffer collection.
9452 ///
9453 /// Verbose logging includes constraints set via
9454 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
9455 /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
9456 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
9457 /// the tree of `Node`(s).
9458 ///
9459 /// Normally sysmem prints only a single line complaint when aggregation
9460 /// fails, with just the specific detailed reason that aggregation failed,
9461 /// with little surrounding context. While this is often enough to diagnose
9462 /// a problem if only a small change was made and everything was working
9463 /// before the small change, it's often not particularly helpful for getting
9464 /// a new buffer collection to work for the first time. Especially with
9465 /// more complex trees of nodes, involving things like
9466 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
9467 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
9468 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
9469 /// subtrees of nodes, verbose logging may help in diagnosing what the tree
9470 /// looks like and why it's failing a logical allocation, or why a tree or
9471 /// subtree is failing sooner than expected.
9472 ///
9473 /// The intent of the extra logging is to be acceptable from a performance
9474 /// point of view, under the assumption that verbose logging is only enabled
9475 /// on a low number of buffer collections. If we're not tracking down a bug,
9476 /// we shouldn't send this message.
9477 pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
9478 self.client.send::<fidl::encoding::EmptyPayload>(
9479 (),
9480 0x5209c77415b4dfad,
9481 fidl::encoding::DynamicFlags::FLEXIBLE,
9482 )
9483 }
9484
9485 /// This gets a handle that can be used as a parameter to
9486 /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
9487 /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
9488 /// client obtained this handle from this `Node`.
9489 ///
9490 /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
9491 /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
9492 /// despite the two calls typically being on different channels.
9493 ///
9494 /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
9495 ///
9496 /// All table fields are currently required.
9497 ///
9498 /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
9499 /// different `Node` channel, to prove that the client obtained the handle
9500 /// from this `Node`.
9501 pub fn r#get_node_ref(
9502 &self,
9503 ___deadline: zx::MonotonicInstant,
9504 ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
9505 let _response = self.client.send_query::<
9506 fidl::encoding::EmptyPayload,
9507 fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
9508 >(
9509 (),
9510 0x5b3d0e51614df053,
9511 fidl::encoding::DynamicFlags::FLEXIBLE,
9512 ___deadline,
9513 )?
9514 .into_result::<BufferCollectionTokenGroupMarker>("get_node_ref")?;
9515 Ok(_response)
9516 }
9517
9518 /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
9519 /// rooted at a different child token of a common parent
9520 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
9521 /// passed-in `node_ref`.
9522 ///
9523 /// This call is for assisting with admission control de-duplication, and
9524 /// with debugging.
9525 ///
9526 /// The `node_ref` must be obtained using
9527 /// [`fuchsia.sysmem2/Node.GetNodeRef`].
9528 ///
9529 /// The `node_ref` can be a duplicated handle; it's not necessary to call
9530 /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
9531 ///
9532 /// If a calling token may not actually be a valid token at all due to a
9533 /// potentially hostile/untrusted provider of the token, call
9534 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
9535 /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
9536 /// never responds due to a calling token not being a real token (not really
9537 /// talking to sysmem). Another option is to call
9538 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
9539 /// which also validates the token along with converting it to a
9540 /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
9541 ///
9542 /// All table fields are currently required.
9543 ///
9544 /// - response `is_alternate`
9545 /// - true: The first parent node in common between the calling node and
9546 /// the `node_ref` `Node` is a `BufferCollectionTokenGroup`. This means
9547 /// that the calling `Node` and the `node_ref` `Node` will not have both
9548 /// their constraints apply - rather sysmem will choose one or the other
9549 /// of the constraints - never both. This is because only one child of
9550 /// a `BufferCollectionTokenGroup` is selected during logical
9551 /// allocation, with only that one child's subtree contributing to
9552 /// constraints aggregation.
9553 /// - false: The first parent node in common between the calling `Node`
9554 /// and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
9555 /// Currently, this means the first parent node in common is a
9556 /// `BufferCollectionToken` or `BufferCollection` (regardless of not
9557 /// `Release`ed). This means that the calling `Node` and the `node_ref`
9558 /// `Node` may have both their constraints apply during constraints
9559 /// aggregation of the logical allocation, if both `Node`(s) are
9560 /// selected by any parent `BufferCollectionTokenGroup`(s) involved. In
9561 /// this case, there is no `BufferCollectionTokenGroup` that will
9562 /// directly prevent the two `Node`(s) from both being selected and
9563 /// their constraints both aggregated, but even when false, one or both
9564 /// `Node`(s) may still be eliminated from consideration if one or both
9565 /// `Node`(s) has a direct or indirect parent
9566 /// `BufferCollectionTokenGroup` which selects a child subtree other
9567 /// than the subtree containing the calling `Node` or `node_ref` `Node`.
9568 /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
9569 /// associated with the same buffer collection as the calling `Node`.
9570 /// Another reason for this error is if the `node_ref` is an
9571 /// [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
9572 /// a real `node_ref` obtained from `GetNodeRef`.
9573 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
9574 /// `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
9575 /// the needed rights expected on a real `node_ref`.
9576 /// * No other failing status codes are returned by this call. However,
9577 /// sysmem may add additional codes in future, so the client should have
9578 /// sensible default handling for any failing status code.
9579 pub fn r#is_alternate_for(
9580 &self,
9581 mut payload: NodeIsAlternateForRequest,
9582 ___deadline: zx::MonotonicInstant,
9583 ) -> Result<NodeIsAlternateForResult, fidl::Error> {
9584 let _response = self.client.send_query::<
9585 NodeIsAlternateForRequest,
9586 fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
9587 >(
9588 &mut payload,
9589 0x3a58e00157e0825,
9590 fidl::encoding::DynamicFlags::FLEXIBLE,
9591 ___deadline,
9592 )?
9593 .into_result::<BufferCollectionTokenGroupMarker>("is_alternate_for")?;
9594 Ok(_response.map(|x| x))
9595 }
9596
9597 /// Get the buffer collection ID. This ID is also available from
9598 /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
9599 /// within the collection).
9600 ///
9601 /// This call is mainly useful in situations where we can't convey a
9602 /// [`fuchsia.sysmem2/BufferCollectionToken`] or
9603 /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
9604 /// handle, which can be joined back up with a `BufferCollection` client end
9605 /// that was created via a different path. Prefer to convey a
9606 /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
9607 ///
9608 /// Trusting a `buffer_collection_id` value from a source other than sysmem
9609 /// is analogous to trusting a koid value from a source other than zircon.
9610 /// Both should be avoided unless really necessary, and both require
9611 /// caution. In some situations it may be reasonable to refer to a
9612 /// pre-established `BufferCollection` by `buffer_collection_id` via a
9613 /// protocol for efficiency reasons, but an incoming value purporting to be
9614 /// a `buffer_collection_id` is not sufficient alone to justify granting the
9615 /// sender of the `buffer_collection_id` any capability. The sender must
9616 /// first prove to a receiver that the sender has/had a VMO or has/had a
9617 /// `BufferCollectionToken` to the same collection by sending a handle that
9618 /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
9619 /// `buffer_collection_id` value. The receiver should take care to avoid
9620 /// assuming that a sender had a `BufferCollectionToken` in cases where the
9621 /// sender has only proven that the sender had a VMO.
9622 ///
9623 /// - response `buffer_collection_id` This ID is unique per buffer
9624 /// collection per boot. Each buffer is uniquely identified by the
9625 /// `buffer_collection_id` and `buffer_index` together.
9626 pub fn r#get_buffer_collection_id(
9627 &self,
9628 ___deadline: zx::MonotonicInstant,
9629 ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
9630 let _response = self.client.send_query::<
9631 fidl::encoding::EmptyPayload,
9632 fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
9633 >(
9634 (),
9635 0x77d19a494b78ba8c,
9636 fidl::encoding::DynamicFlags::FLEXIBLE,
9637 ___deadline,
9638 )?
9639 .into_result::<BufferCollectionTokenGroupMarker>("get_buffer_collection_id")?;
9640 Ok(_response)
9641 }
9642
9643 /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
9644 /// created after this message to weak, which means that a client's `Node`
9645 /// client end (or a child created after this message) is not alone
9646 /// sufficient to keep allocated VMOs alive.
9647 ///
9648 /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
9649 /// `close_weak_asap`.
9650 ///
9651 /// This message is only permitted before the `Node` becomes ready for
9652 /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
9653 /// * `BufferCollectionToken`: any time
9654 /// * `BufferCollection`: before `SetConstraints`
9655 /// * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
9656 ///
9657 /// Currently, no conversion from strong `Node` to weak `Node` after ready
9658 /// for allocation is provided, but a client can simulate that by creating
9659 /// an additional `Node` before allocation and setting that additional
9660 /// `Node` to weak, and then potentially at some point later sending
9661 /// `Release` and closing the client end of the client's strong `Node`, but
9662 /// keeping the client's weak `Node`.
9663 ///
9664 /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
9665 /// collection failure (all `Node` client end(s) will see
9666 /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
9667 /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
9668 /// this situation until all `Node`(s) are ready for allocation. For initial
9669 /// allocation to succeed, at least one strong `Node` is required to exist
9670 /// at allocation time, but after that client receives VMO handles, that
9671 /// client can `BufferCollection.Release` and close the client end without
9672 /// causing this type of failure.
9673 ///
9674 /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
9675 /// imply `SetWeakOk` with `for_children_also` true, which can be sent
9676 /// separately as appropriate.
9677 pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
9678 self.client.send::<fidl::encoding::EmptyPayload>(
9679 (),
9680 0x22dd3ea514eeffe1,
9681 fidl::encoding::DynamicFlags::FLEXIBLE,
9682 )
9683 }
9684
9685 /// This indicates to sysmem that the client is prepared to pay attention to
9686 /// `close_weak_asap`.
9687 ///
9688 /// If sent, this message must be before
9689 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
9690 ///
9691 /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
9692 /// send this message before `WaitForAllBuffersAllocated`, or a parent
9693 /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
9694 /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
9695 /// trigger buffer collection failure.
9696 ///
9697 /// This message is necessary because weak sysmem VMOs have not always been
9698 /// a thing, so older clients are not aware of the need to pay attention to
9699 /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
9700 /// sysmem weak VMO handles asap. By having this message and requiring
9701 /// participants to indicate their acceptance of this aspect of the overall
9702 /// protocol, we avoid situations where an older client is delivered a weak
9703 /// VMO without any way for sysmem to get that VMO to close quickly later
9704 /// (and on a per-buffer basis).
9705 ///
9706 /// A participant that doesn't handle `close_weak_asap` and also doesn't
9707 /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
9708 /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
9709 /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
9710 /// same participant has a child/delegate which does retrieve VMOs, that
9711 /// child/delegate will need to send `SetWeakOk` before
9712 /// `WaitForAllBuffersAllocated`.
9713 ///
9714 /// + request `for_child_nodes_also` If present and true, this means direct
9715 /// child nodes of this node created after this message plus all
9716 /// descendants of those nodes will behave as if `SetWeakOk` was sent on
9717 /// those nodes. Any child node of this node that was created before this
9718 /// message is not included. This setting is "sticky" in the sense that a
9719 /// subsequent `SetWeakOk` without this bool set to true does not reset
9720 /// the server-side bool. If this creates a problem for a participant, a
9721 /// workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
9722 /// tokens instead, as appropriate. A participant should only set
9723 /// `for_child_nodes_also` true if the participant can really promise to
9724 /// obey `close_weak_asap` both for its own weak VMO handles, and for all
9725 /// weak VMO handles held by participants holding the corresponding child
9726 /// `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
9727 /// which are using sysmem(1) can be weak, despite the clients of those
9728 /// sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
9729 /// direct way to find out about `close_weak_asap`. This only applies to
9730 /// descendents of this `Node` which are using sysmem(1), not to this
9731 /// `Node` when converted directly from a sysmem2 token to a sysmem(1)
9732 /// token, which will fail allocation unless an ancestor of this `Node`
9733 /// specified `for_child_nodes_also` true.
9734 pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
9735 self.client.send::<NodeSetWeakOkRequest>(
9736 &mut payload,
9737 0x38a44fc4d7724be9,
9738 fidl::encoding::DynamicFlags::FLEXIBLE,
9739 )
9740 }
9741
9742 /// The server_end will be closed after this `Node` and any child nodes have
9743 /// have released their buffer counts, making those counts available for
9744 /// reservation by a different `Node` via
9745 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
9746 ///
9747 /// The `Node` buffer counts may not be released until the entire tree of
9748 /// `Node`(s) is closed or failed, because
9749 /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
9750 /// does not immediately un-reserve the `Node` buffer counts. Instead, the
9751 /// `Node` buffer counts remain reserved until the orphaned node is later
9752 /// cleaned up.
9753 ///
9754 /// If the `Node` exceeds a fairly large number of attached eventpair server
9755 /// ends, a log message will indicate this and the `Node` (and the
9756 /// appropriate) sub-tree will fail.
9757 ///
9758 /// The `server_end` will remain open when
9759 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
9760 /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
9761 /// [`fuchsia.sysmem2/BufferCollection`].
9762 ///
9763 /// This message can also be used with a
9764 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
9765 pub fn r#attach_node_tracking(
9766 &self,
9767 mut payload: NodeAttachNodeTrackingRequest,
9768 ) -> Result<(), fidl::Error> {
9769 self.client.send::<NodeAttachNodeTrackingRequest>(
9770 &mut payload,
9771 0x3f22f2a293d3cdac,
9772 fidl::encoding::DynamicFlags::FLEXIBLE,
9773 )
9774 }
9775
9776 /// Create a child [`fuchsia.sysmem2/BufferCollectionToken`]. Only one child
9777 /// (including its children) will be selected during allocation (or logical
9778 /// allocation).
9779 ///
9780 /// Before passing the client end of this token to
9781 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], completion of
9782 /// [`fuchsia.sysmem2/Node.Sync`] after
9783 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`] is required.
9784 /// Or the client can use
9785 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChildrenSync`] which
9786 /// essentially includes the `Sync`.
9787 ///
9788 /// Sending CreateChild after AllChildrenPresent is not permitted; this will
9789 /// fail the group's subtree and close the connection.
9790 ///
9791 /// After all children have been created, send AllChildrenPresent.
9792 ///
9793 /// + request `token_request` The server end of the new token channel.
9794 /// + request `rights_attenuation_mask` If ZX_RIGHT_SAME_RIGHTS, the created
9795 /// token allows the holder to get the same rights to buffers as the
9796 /// parent token (of the group) had. When the value isn't
9797 /// ZX_RIGHT_SAME_RIGHTS, the value is interpretted as a bitmask with 0
9798 /// bits ensuring those rights are attentuated, so 0xFFFFFFFF is a synonym
9799 /// for ZX_RIGHT_SAME_RIGHTS. The value 0 is not allowed and intentionally
9800 /// causes subtree failure.
9801 pub fn r#create_child(
9802 &self,
9803 mut payload: BufferCollectionTokenGroupCreateChildRequest,
9804 ) -> Result<(), fidl::Error> {
9805 self.client.send::<BufferCollectionTokenGroupCreateChildRequest>(
9806 &mut payload,
9807 0x41a0075d419f30c5,
9808 fidl::encoding::DynamicFlags::FLEXIBLE,
9809 )
9810 }
9811
9812 /// Create 1 or more child tokens at once, synchronously. In contrast to
9813 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`], no
9814 /// [`fuchsia.sysmem2/Node.Sync`] is required before passing the client end
9815 /// of a returned token to
9816 /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`].
9817 ///
9818 /// The lower-index child tokens are higher priority (attempted sooner) than
9819 /// higher-index child tokens.
9820 ///
9821 /// As per all child tokens, successful aggregation will choose exactly one
9822 /// child among all created children (across all children created across
9823 /// potentially multiple calls to
9824 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`] and
9825 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChildrenSync`]).
9826 ///
9827 /// The maximum permissible total number of children per group, and total
9828 /// number of nodes in an overall tree (from the root) are capped to limits
9829 /// which are not configurable via these protocols.
9830 ///
9831 /// Sending CreateChildrenSync after AllChildrenPresent is not permitted;
9832 /// this will fail the group's subtree and close the connection.
9833 ///
9834 /// After all children have been created, send AllChildrenPresent.
9835 ///
9836 /// + request `rights_attentuation_masks` The size of the
9837 /// `rights_attentuation_masks` determines the number of created child
9838 /// tokens. The value ZX_RIGHT_SAME_RIGHTS doesn't attenuate any rights.
9839 /// The value 0xFFFFFFFF is a synonym for ZX_RIGHT_SAME_RIGHTS. For any
9840 /// other value, each 0 bit in the mask attenuates that right.
9841 /// - response `tokens` The created child tokens.
9842 pub fn r#create_children_sync(
9843 &self,
9844 mut payload: &BufferCollectionTokenGroupCreateChildrenSyncRequest,
9845 ___deadline: zx::MonotonicInstant,
9846 ) -> Result<BufferCollectionTokenGroupCreateChildrenSyncResponse, fidl::Error> {
9847 let _response = self.client.send_query::<
9848 BufferCollectionTokenGroupCreateChildrenSyncRequest,
9849 fidl::encoding::FlexibleType<BufferCollectionTokenGroupCreateChildrenSyncResponse>,
9850 >(
9851 payload,
9852 0x15dea448c536070a,
9853 fidl::encoding::DynamicFlags::FLEXIBLE,
9854 ___deadline,
9855 )?
9856 .into_result::<BufferCollectionTokenGroupMarker>("create_children_sync")?;
9857 Ok(_response)
9858 }
9859
9860 /// Indicate that no more children will be created.
9861 ///
9862 /// After creating all children, the client should send
9863 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent`] to
9864 /// inform sysmem that no more children will be created, so that sysmem can
9865 /// know when it's ok to start aggregating constraints.
9866 ///
9867 /// Sending CreateChild after AllChildrenPresent is not permitted; this will
9868 /// fail the group's subtree and close the connection.
9869 ///
9870 /// If [`fuchsia.sysmem2/Node.Release`] is to be sent, it should be sent
9871 /// after `AllChildrenPresent`, else failure of the group's subtree will be
9872 /// triggered. This is intentionally not analogous to how `Release` without
9873 /// prior [`fuchsia.sysmem2/BufferCollection.SetConstraints`] doesn't cause
9874 /// subtree failure.
9875 pub fn r#all_children_present(&self) -> Result<(), fidl::Error> {
9876 self.client.send::<fidl::encoding::EmptyPayload>(
9877 (),
9878 0x5c327e4a23391312,
9879 fidl::encoding::DynamicFlags::FLEXIBLE,
9880 )
9881 }
9882}
9883
9884#[cfg(target_os = "fuchsia")]
9885impl From<BufferCollectionTokenGroupSynchronousProxy> for zx::NullableHandle {
9886 fn from(value: BufferCollectionTokenGroupSynchronousProxy) -> Self {
9887 value.into_channel().into()
9888 }
9889}
9890
9891#[cfg(target_os = "fuchsia")]
9892impl From<fidl::Channel> for BufferCollectionTokenGroupSynchronousProxy {
9893 fn from(value: fidl::Channel) -> Self {
9894 Self::new(value)
9895 }
9896}
9897
9898#[cfg(target_os = "fuchsia")]
9899impl fidl::endpoints::FromClient for BufferCollectionTokenGroupSynchronousProxy {
9900 type Protocol = BufferCollectionTokenGroupMarker;
9901
9902 fn from_client(value: fidl::endpoints::ClientEnd<BufferCollectionTokenGroupMarker>) -> Self {
9903 Self::new(value.into_channel())
9904 }
9905}
9906
9907#[derive(Debug, Clone)]
9908pub struct BufferCollectionTokenGroupProxy {
9909 client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
9910}
9911
9912impl fidl::endpoints::Proxy for BufferCollectionTokenGroupProxy {
9913 type Protocol = BufferCollectionTokenGroupMarker;
9914
9915 fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
9916 Self::new(inner)
9917 }
9918
9919 fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
9920 self.client.into_channel().map_err(|client| Self { client })
9921 }
9922
9923 fn as_channel(&self) -> &::fidl::AsyncChannel {
9924 self.client.as_channel()
9925 }
9926}
9927
9928impl BufferCollectionTokenGroupProxy {
9929 /// Create a new Proxy for fuchsia.sysmem2/BufferCollectionTokenGroup.
9930 pub fn new(channel: ::fidl::AsyncChannel) -> Self {
9931 let protocol_name =
9932 <BufferCollectionTokenGroupMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
9933 Self { client: fidl::client::Client::new(channel, protocol_name) }
9934 }
9935
9936 /// Get a Stream of events from the remote end of the protocol.
9937 ///
9938 /// # Panics
9939 ///
9940 /// Panics if the event stream was already taken.
9941 pub fn take_event_stream(&self) -> BufferCollectionTokenGroupEventStream {
9942 BufferCollectionTokenGroupEventStream { event_receiver: self.client.take_event_receiver() }
9943 }
9944
9945 /// Ensure that previous messages have been received server side. This is
9946 /// particularly useful after previous messages that created new tokens,
9947 /// because a token must be known to the sysmem server before sending the
9948 /// token to another participant.
9949 ///
9950 /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
9951 /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
9952 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
9953 /// to mitigate the possibility of a hostile/fake
9954 /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
9955 /// Another way is to pass the token to
9956 /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
9957 /// the token as part of exchanging it for a
9958 /// [`fuchsia.sysmem2/BufferCollection`] channel, and
9959 /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
9960 /// of stalling.
9961 ///
9962 /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
9963 /// and then starting and completing a `Sync`, it's then safe to send the
9964 /// `BufferCollectionToken` client ends to other participants knowing the
9965 /// server will recognize the tokens when they're sent by the other
9966 /// participants to sysmem in a
9967 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
9968 /// efficient way to create tokens while avoiding unnecessary round trips.
9969 ///
9970 /// Other options include waiting for each
9971 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
9972 /// individually (using separate call to `Sync` after each), or calling
9973 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
9974 /// converted to a `BufferCollection` via
9975 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
9976 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
9977 /// the sync step and can create multiple tokens at once.
9978 pub fn r#sync(
9979 &self,
9980 ) -> fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect> {
9981 BufferCollectionTokenGroupProxyInterface::r#sync(self)
9982 }
9983
9984 /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
9985 ///
9986 /// Normally a participant will convert a `BufferCollectionToken` into a
9987 /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
9988 /// `Release` via the token (and then close the channel immediately or
9989 /// shortly later in response to server closing the server end), which
9990 /// avoids causing buffer collection failure. Without a prior `Release`,
9991 /// closing the `BufferCollectionToken` client end will cause buffer
9992 /// collection failure.
9993 ///
9994 /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
9995 ///
9996 /// By default the server handles unexpected closure of a
9997 /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
9998 /// first) by failing the buffer collection. Partly this is to expedite
9999 /// closing VMO handles to reclaim memory when any participant fails. If a
10000 /// participant would like to cleanly close a `BufferCollection` without
10001 /// causing buffer collection failure, the participant can send `Release`
10002 /// before closing the `BufferCollection` client end. The `Release` can
10003 /// occur before or after `SetConstraints`. If before `SetConstraints`, the
10004 /// buffer collection won't require constraints from this node in order to
10005 /// allocate. If after `SetConstraints`, the constraints are retained and
10006 /// aggregated, despite the lack of `BufferCollection` connection at the
10007 /// time of constraints aggregation.
10008 ///
10009 /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
10010 ///
10011 /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
10012 /// end (without `Release` first) will trigger failure of the buffer
10013 /// collection. To close a `BufferCollectionTokenGroup` channel without
10014 /// failing the buffer collection, ensure that AllChildrenPresent() has been
10015 /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
10016 /// client end.
10017 ///
10018 /// If `Release` occurs before
10019 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
10020 /// buffer collection will fail (triggered by reception of `Release` without
10021 /// prior `AllChildrenPresent`). This is intentionally not analogous to how
10022 /// [`fuchsia.sysmem2/BufferCollection.Release`] without
10023 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
10024 /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
10025 /// close requires `AllChildrenPresent` (if not already sent), then
10026 /// `Release`, then close client end.
10027 ///
10028 /// If `Release` occurs after `AllChildrenPresent`, the children and all
10029 /// their constraints remain intact (just as they would if the
10030 /// `BufferCollectionTokenGroup` channel had remained open), and the client
10031 /// end close doesn't trigger buffer collection failure.
10032 ///
10033 /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
10034 ///
10035 /// For brevity, the per-channel-protocol paragraphs above ignore the
10036 /// separate failure domain created by
10037 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
10038 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
10039 /// unexpectedly closes (without `Release` first) and that client end is
10040 /// under a failure domain, instead of failing the whole buffer collection,
10041 /// the failure domain is failed, but the buffer collection itself is
10042 /// isolated from failure of the failure domain. Such failure domains can be
10043 /// nested, in which case only the inner-most failure domain in which the
10044 /// `Node` resides fails.
10045 pub fn r#release(&self) -> Result<(), fidl::Error> {
10046 BufferCollectionTokenGroupProxyInterface::r#release(self)
10047 }
10048
10049 /// Set a name for VMOs in this buffer collection.
10050 ///
10051 /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
10052 /// will be truncated to fit. The name of the vmo will be suffixed with the
10053 /// buffer index within the collection (if the suffix fits within
10054 /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
10055 /// listed in the inspect data.
10056 ///
10057 /// The name only affects VMOs allocated after the name is set; this call
10058 /// does not rename existing VMOs. If multiple clients set different names
10059 /// then the larger priority value will win. Setting a new name with the
10060 /// same priority as a prior name doesn't change the name.
10061 ///
10062 /// All table fields are currently required.
10063 ///
10064 /// + request `priority` The name is only set if this is the first `SetName`
10065 /// or if `priority` is greater than any previous `priority` value in
10066 /// prior `SetName` calls across all `Node`(s) of this buffer collection.
10067 /// + request `name` The name for VMOs created under this buffer collection.
10068 pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
10069 BufferCollectionTokenGroupProxyInterface::r#set_name(self, payload)
10070 }
10071
10072 /// Set information about the current client that can be used by sysmem to
10073 /// help diagnose leaking memory and allocation stalls waiting for a
10074 /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
10075 ///
10076 /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
10077 /// `Node`(s) derived from this `Node`, unless overriden by
10078 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
10079 /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
10080 ///
10081 /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
10082 /// `Allocator` is the most efficient way to ensure that all
10083 /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
10084 /// set, and is also more efficient than separately sending the same debug
10085 /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
10086 /// created [`fuchsia.sysmem2/Node`].
10087 ///
10088 /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
10089 /// indicate which client is closing their channel first, leading to subtree
10090 /// failure (which can be normal if the purpose of the subtree is over, but
10091 /// if happening earlier than expected, the client-channel-specific name can
10092 /// help diagnose where the failure is first coming from, from sysmem's
10093 /// point of view).
10094 ///
10095 /// All table fields are currently required.
10096 ///
10097 /// + request `name` This can be an arbitrary string, but the current
10098 /// process name (see `fsl::GetCurrentProcessName`) is a good default.
10099 /// + request `id` This can be an arbitrary id, but the current process ID
10100 /// (see `fsl::GetCurrentProcessKoid`) is a good default.
10101 pub fn r#set_debug_client_info(
10102 &self,
10103 mut payload: &NodeSetDebugClientInfoRequest,
10104 ) -> Result<(), fidl::Error> {
10105 BufferCollectionTokenGroupProxyInterface::r#set_debug_client_info(self, payload)
10106 }
10107
10108 /// Sysmem logs a warning if sysmem hasn't seen
10109 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
10110 /// within 5 seconds after creation of a new collection.
10111 ///
10112 /// Clients can call this method to change when the log is printed. If
10113 /// multiple client set the deadline, it's unspecified which deadline will
10114 /// take effect.
10115 ///
10116 /// In most cases the default works well.
10117 ///
10118 /// All table fields are currently required.
10119 ///
10120 /// + request `deadline` The time at which sysmem will start trying to log
10121 /// the warning, unless all constraints are with sysmem by then.
10122 pub fn r#set_debug_timeout_log_deadline(
10123 &self,
10124 mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
10125 ) -> Result<(), fidl::Error> {
10126 BufferCollectionTokenGroupProxyInterface::r#set_debug_timeout_log_deadline(self, payload)
10127 }
10128
10129 /// This enables verbose logging for the buffer collection.
10130 ///
10131 /// Verbose logging includes constraints set via
10132 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
10133 /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
10134 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
10135 /// the tree of `Node`(s).
10136 ///
10137 /// Normally sysmem prints only a single line complaint when aggregation
10138 /// fails, with just the specific detailed reason that aggregation failed,
10139 /// with little surrounding context. While this is often enough to diagnose
10140 /// a problem if only a small change was made and everything was working
10141 /// before the small change, it's often not particularly helpful for getting
10142 /// a new buffer collection to work for the first time. Especially with
10143 /// more complex trees of nodes, involving things like
10144 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
10145 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
10146 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
10147 /// subtrees of nodes, verbose logging may help in diagnosing what the tree
10148 /// looks like and why it's failing a logical allocation, or why a tree or
10149 /// subtree is failing sooner than expected.
10150 ///
10151 /// The intent of the extra logging is to be acceptable from a performance
10152 /// point of view, under the assumption that verbose logging is only enabled
10153 /// on a low number of buffer collections. If we're not tracking down a bug,
10154 /// we shouldn't send this message.
10155 pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
10156 BufferCollectionTokenGroupProxyInterface::r#set_verbose_logging(self)
10157 }
10158
10159 /// This gets a handle that can be used as a parameter to
10160 /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
10161 /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
10162 /// client obtained this handle from this `Node`.
10163 ///
10164 /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
10165 /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
10166 /// despite the two calls typically being on different channels.
10167 ///
10168 /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
10169 ///
10170 /// All table fields are currently required.
10171 ///
10172 /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
10173 /// different `Node` channel, to prove that the client obtained the handle
10174 /// from this `Node`.
10175 pub fn r#get_node_ref(
10176 &self,
10177 ) -> fidl::client::QueryResponseFut<
10178 NodeGetNodeRefResponse,
10179 fidl::encoding::DefaultFuchsiaResourceDialect,
10180 > {
10181 BufferCollectionTokenGroupProxyInterface::r#get_node_ref(self)
10182 }
10183
10184 /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
10185 /// rooted at a different child token of a common parent
10186 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
10187 /// passed-in `node_ref`.
10188 ///
10189 /// This call is for assisting with admission control de-duplication, and
10190 /// with debugging.
10191 ///
10192 /// The `node_ref` must be obtained using
10193 /// [`fuchsia.sysmem2/Node.GetNodeRef`].
10194 ///
10195 /// The `node_ref` can be a duplicated handle; it's not necessary to call
10196 /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
10197 ///
10198 /// If a calling token may not actually be a valid token at all due to a
10199 /// potentially hostile/untrusted provider of the token, call
10200 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
10201 /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
10202 /// never responds due to a calling token not being a real token (not really
10203 /// talking to sysmem). Another option is to call
10204 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
10205 /// which also validates the token along with converting it to a
10206 /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
10207 ///
10208 /// All table fields are currently required.
10209 ///
10210 /// - response `is_alternate`
10211 /// - true: The first parent node in common between the calling node and
10212 /// the `node_ref` `Node` is a `BufferCollectionTokenGroup`. This means
10213 /// that the calling `Node` and the `node_ref` `Node` will not have both
10214 /// their constraints apply - rather sysmem will choose one or the other
10215 /// of the constraints - never both. This is because only one child of
10216 /// a `BufferCollectionTokenGroup` is selected during logical
10217 /// allocation, with only that one child's subtree contributing to
10218 /// constraints aggregation.
10219 /// - false: The first parent node in common between the calling `Node`
10220 /// and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
10221 /// Currently, this means the first parent node in common is a
10222 /// `BufferCollectionToken` or `BufferCollection` (regardless of not
10223 /// `Release`ed). This means that the calling `Node` and the `node_ref`
10224 /// `Node` may have both their constraints apply during constraints
10225 /// aggregation of the logical allocation, if both `Node`(s) are
10226 /// selected by any parent `BufferCollectionTokenGroup`(s) involved. In
10227 /// this case, there is no `BufferCollectionTokenGroup` that will
10228 /// directly prevent the two `Node`(s) from both being selected and
10229 /// their constraints both aggregated, but even when false, one or both
10230 /// `Node`(s) may still be eliminated from consideration if one or both
10231 /// `Node`(s) has a direct or indirect parent
10232 /// `BufferCollectionTokenGroup` which selects a child subtree other
10233 /// than the subtree containing the calling `Node` or `node_ref` `Node`.
10234 /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
10235 /// associated with the same buffer collection as the calling `Node`.
10236 /// Another reason for this error is if the `node_ref` is an
10237 /// [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
10238 /// a real `node_ref` obtained from `GetNodeRef`.
10239 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
10240 /// `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
10241 /// the needed rights expected on a real `node_ref`.
10242 /// * No other failing status codes are returned by this call. However,
10243 /// sysmem may add additional codes in future, so the client should have
10244 /// sensible default handling for any failing status code.
10245 pub fn r#is_alternate_for(
10246 &self,
10247 mut payload: NodeIsAlternateForRequest,
10248 ) -> fidl::client::QueryResponseFut<
10249 NodeIsAlternateForResult,
10250 fidl::encoding::DefaultFuchsiaResourceDialect,
10251 > {
10252 BufferCollectionTokenGroupProxyInterface::r#is_alternate_for(self, payload)
10253 }
10254
10255 /// Get the buffer collection ID. This ID is also available from
10256 /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
10257 /// within the collection).
10258 ///
10259 /// This call is mainly useful in situations where we can't convey a
10260 /// [`fuchsia.sysmem2/BufferCollectionToken`] or
10261 /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
10262 /// handle, which can be joined back up with a `BufferCollection` client end
10263 /// that was created via a different path. Prefer to convey a
10264 /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
10265 ///
10266 /// Trusting a `buffer_collection_id` value from a source other than sysmem
10267 /// is analogous to trusting a koid value from a source other than zircon.
10268 /// Both should be avoided unless really necessary, and both require
10269 /// caution. In some situations it may be reasonable to refer to a
10270 /// pre-established `BufferCollection` by `buffer_collection_id` via a
10271 /// protocol for efficiency reasons, but an incoming value purporting to be
10272 /// a `buffer_collection_id` is not sufficient alone to justify granting the
10273 /// sender of the `buffer_collection_id` any capability. The sender must
10274 /// first prove to a receiver that the sender has/had a VMO or has/had a
10275 /// `BufferCollectionToken` to the same collection by sending a handle that
10276 /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
10277 /// `buffer_collection_id` value. The receiver should take care to avoid
10278 /// assuming that a sender had a `BufferCollectionToken` in cases where the
10279 /// sender has only proven that the sender had a VMO.
10280 ///
10281 /// - response `buffer_collection_id` This ID is unique per buffer
10282 /// collection per boot. Each buffer is uniquely identified by the
10283 /// `buffer_collection_id` and `buffer_index` together.
10284 pub fn r#get_buffer_collection_id(
10285 &self,
10286 ) -> fidl::client::QueryResponseFut<
10287 NodeGetBufferCollectionIdResponse,
10288 fidl::encoding::DefaultFuchsiaResourceDialect,
10289 > {
10290 BufferCollectionTokenGroupProxyInterface::r#get_buffer_collection_id(self)
10291 }
10292
10293 /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
10294 /// created after this message to weak, which means that a client's `Node`
10295 /// client end (or a child created after this message) is not alone
10296 /// sufficient to keep allocated VMOs alive.
10297 ///
10298 /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
10299 /// `close_weak_asap`.
10300 ///
10301 /// This message is only permitted before the `Node` becomes ready for
10302 /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
10303 /// * `BufferCollectionToken`: any time
10304 /// * `BufferCollection`: before `SetConstraints`
10305 /// * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
10306 ///
10307 /// Currently, no conversion from strong `Node` to weak `Node` after ready
10308 /// for allocation is provided, but a client can simulate that by creating
10309 /// an additional `Node` before allocation and setting that additional
10310 /// `Node` to weak, and then potentially at some point later sending
10311 /// `Release` and closing the client end of the client's strong `Node`, but
10312 /// keeping the client's weak `Node`.
10313 ///
10314 /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
10315 /// collection failure (all `Node` client end(s) will see
10316 /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
10317 /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
10318 /// this situation until all `Node`(s) are ready for allocation. For initial
10319 /// allocation to succeed, at least one strong `Node` is required to exist
10320 /// at allocation time, but after that client receives VMO handles, that
10321 /// client can `BufferCollection.Release` and close the client end without
10322 /// causing this type of failure.
10323 ///
10324 /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
10325 /// imply `SetWeakOk` with `for_children_also` true, which can be sent
10326 /// separately as appropriate.
10327 pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
10328 BufferCollectionTokenGroupProxyInterface::r#set_weak(self)
10329 }
10330
10331 /// This indicates to sysmem that the client is prepared to pay attention to
10332 /// `close_weak_asap`.
10333 ///
10334 /// If sent, this message must be before
10335 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
10336 ///
10337 /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
10338 /// send this message before `WaitForAllBuffersAllocated`, or a parent
10339 /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
10340 /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
10341 /// trigger buffer collection failure.
10342 ///
10343 /// This message is necessary because weak sysmem VMOs have not always been
10344 /// a thing, so older clients are not aware of the need to pay attention to
10345 /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
10346 /// sysmem weak VMO handles asap. By having this message and requiring
10347 /// participants to indicate their acceptance of this aspect of the overall
10348 /// protocol, we avoid situations where an older client is delivered a weak
10349 /// VMO without any way for sysmem to get that VMO to close quickly later
10350 /// (and on a per-buffer basis).
10351 ///
10352 /// A participant that doesn't handle `close_weak_asap` and also doesn't
10353 /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
10354 /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
10355 /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
10356 /// same participant has a child/delegate which does retrieve VMOs, that
10357 /// child/delegate will need to send `SetWeakOk` before
10358 /// `WaitForAllBuffersAllocated`.
10359 ///
10360 /// + request `for_child_nodes_also` If present and true, this means direct
10361 /// child nodes of this node created after this message plus all
10362 /// descendants of those nodes will behave as if `SetWeakOk` was sent on
10363 /// those nodes. Any child node of this node that was created before this
10364 /// message is not included. This setting is "sticky" in the sense that a
10365 /// subsequent `SetWeakOk` without this bool set to true does not reset
10366 /// the server-side bool. If this creates a problem for a participant, a
10367 /// workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
10368 /// tokens instead, as appropriate. A participant should only set
10369 /// `for_child_nodes_also` true if the participant can really promise to
10370 /// obey `close_weak_asap` both for its own weak VMO handles, and for all
10371 /// weak VMO handles held by participants holding the corresponding child
10372 /// `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
10373 /// which are using sysmem(1) can be weak, despite the clients of those
10374 /// sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
10375 /// direct way to find out about `close_weak_asap`. This only applies to
10376 /// descendents of this `Node` which are using sysmem(1), not to this
10377 /// `Node` when converted directly from a sysmem2 token to a sysmem(1)
10378 /// token, which will fail allocation unless an ancestor of this `Node`
10379 /// specified `for_child_nodes_also` true.
10380 pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
10381 BufferCollectionTokenGroupProxyInterface::r#set_weak_ok(self, payload)
10382 }
10383
10384 /// The server_end will be closed after this `Node` and any child nodes have
10385 /// have released their buffer counts, making those counts available for
10386 /// reservation by a different `Node` via
10387 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
10388 ///
10389 /// The `Node` buffer counts may not be released until the entire tree of
10390 /// `Node`(s) is closed or failed, because
10391 /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
10392 /// does not immediately un-reserve the `Node` buffer counts. Instead, the
10393 /// `Node` buffer counts remain reserved until the orphaned node is later
10394 /// cleaned up.
10395 ///
10396 /// If the `Node` exceeds a fairly large number of attached eventpair server
10397 /// ends, a log message will indicate this and the `Node` (and the
10398 /// appropriate) sub-tree will fail.
10399 ///
10400 /// The `server_end` will remain open when
10401 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
10402 /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
10403 /// [`fuchsia.sysmem2/BufferCollection`].
10404 ///
10405 /// This message can also be used with a
10406 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
10407 pub fn r#attach_node_tracking(
10408 &self,
10409 mut payload: NodeAttachNodeTrackingRequest,
10410 ) -> Result<(), fidl::Error> {
10411 BufferCollectionTokenGroupProxyInterface::r#attach_node_tracking(self, payload)
10412 }
10413
10414 /// Create a child [`fuchsia.sysmem2/BufferCollectionToken`]. Only one child
10415 /// (including its children) will be selected during allocation (or logical
10416 /// allocation).
10417 ///
10418 /// Before passing the client end of this token to
10419 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], completion of
10420 /// [`fuchsia.sysmem2/Node.Sync`] after
10421 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`] is required.
10422 /// Or the client can use
10423 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChildrenSync`] which
10424 /// essentially includes the `Sync`.
10425 ///
10426 /// Sending CreateChild after AllChildrenPresent is not permitted; this will
10427 /// fail the group's subtree and close the connection.
10428 ///
10429 /// After all children have been created, send AllChildrenPresent.
10430 ///
10431 /// + request `token_request` The server end of the new token channel.
10432 /// + request `rights_attenuation_mask` If ZX_RIGHT_SAME_RIGHTS, the created
10433 /// token allows the holder to get the same rights to buffers as the
10434 /// parent token (of the group) had. When the value isn't
10435 /// ZX_RIGHT_SAME_RIGHTS, the value is interpretted as a bitmask with 0
10436 /// bits ensuring those rights are attentuated, so 0xFFFFFFFF is a synonym
10437 /// for ZX_RIGHT_SAME_RIGHTS. The value 0 is not allowed and intentionally
10438 /// causes subtree failure.
10439 pub fn r#create_child(
10440 &self,
10441 mut payload: BufferCollectionTokenGroupCreateChildRequest,
10442 ) -> Result<(), fidl::Error> {
10443 BufferCollectionTokenGroupProxyInterface::r#create_child(self, payload)
10444 }
10445
10446 /// Create 1 or more child tokens at once, synchronously. In contrast to
10447 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`], no
10448 /// [`fuchsia.sysmem2/Node.Sync`] is required before passing the client end
10449 /// of a returned token to
10450 /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`].
10451 ///
10452 /// The lower-index child tokens are higher priority (attempted sooner) than
10453 /// higher-index child tokens.
10454 ///
10455 /// As per all child tokens, successful aggregation will choose exactly one
10456 /// child among all created children (across all children created across
10457 /// potentially multiple calls to
10458 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`] and
10459 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChildrenSync`]).
10460 ///
10461 /// The maximum permissible total number of children per group, and total
10462 /// number of nodes in an overall tree (from the root) are capped to limits
10463 /// which are not configurable via these protocols.
10464 ///
10465 /// Sending CreateChildrenSync after AllChildrenPresent is not permitted;
10466 /// this will fail the group's subtree and close the connection.
10467 ///
10468 /// After all children have been created, send AllChildrenPresent.
10469 ///
10470 /// + request `rights_attentuation_masks` The size of the
10471 /// `rights_attentuation_masks` determines the number of created child
10472 /// tokens. The value ZX_RIGHT_SAME_RIGHTS doesn't attenuate any rights.
10473 /// The value 0xFFFFFFFF is a synonym for ZX_RIGHT_SAME_RIGHTS. For any
10474 /// other value, each 0 bit in the mask attenuates that right.
10475 /// - response `tokens` The created child tokens.
10476 pub fn r#create_children_sync(
10477 &self,
10478 mut payload: &BufferCollectionTokenGroupCreateChildrenSyncRequest,
10479 ) -> fidl::client::QueryResponseFut<
10480 BufferCollectionTokenGroupCreateChildrenSyncResponse,
10481 fidl::encoding::DefaultFuchsiaResourceDialect,
10482 > {
10483 BufferCollectionTokenGroupProxyInterface::r#create_children_sync(self, payload)
10484 }
10485
10486 /// Indicate that no more children will be created.
10487 ///
10488 /// After creating all children, the client should send
10489 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent`] to
10490 /// inform sysmem that no more children will be created, so that sysmem can
10491 /// know when it's ok to start aggregating constraints.
10492 ///
10493 /// Sending CreateChild after AllChildrenPresent is not permitted; this will
10494 /// fail the group's subtree and close the connection.
10495 ///
10496 /// If [`fuchsia.sysmem2/Node.Release`] is to be sent, it should be sent
10497 /// after `AllChildrenPresent`, else failure of the group's subtree will be
10498 /// triggered. This is intentionally not analogous to how `Release` without
10499 /// prior [`fuchsia.sysmem2/BufferCollection.SetConstraints`] doesn't cause
10500 /// subtree failure.
10501 pub fn r#all_children_present(&self) -> Result<(), fidl::Error> {
10502 BufferCollectionTokenGroupProxyInterface::r#all_children_present(self)
10503 }
10504}
10505
10506impl BufferCollectionTokenGroupProxyInterface for BufferCollectionTokenGroupProxy {
10507 type SyncResponseFut =
10508 fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect>;
10509 fn r#sync(&self) -> Self::SyncResponseFut {
10510 fn _decode(
10511 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
10512 ) -> Result<(), fidl::Error> {
10513 let _response = fidl::client::decode_transaction_body::<
10514 fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
10515 fidl::encoding::DefaultFuchsiaResourceDialect,
10516 0x11ac2555cf575b54,
10517 >(_buf?)?
10518 .into_result::<BufferCollectionTokenGroupMarker>("sync")?;
10519 Ok(_response)
10520 }
10521 self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, ()>(
10522 (),
10523 0x11ac2555cf575b54,
10524 fidl::encoding::DynamicFlags::FLEXIBLE,
10525 _decode,
10526 )
10527 }
10528
10529 fn r#release(&self) -> Result<(), fidl::Error> {
10530 self.client.send::<fidl::encoding::EmptyPayload>(
10531 (),
10532 0x6a5cae7d6d6e04c6,
10533 fidl::encoding::DynamicFlags::FLEXIBLE,
10534 )
10535 }
10536
10537 fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
10538 self.client.send::<NodeSetNameRequest>(
10539 payload,
10540 0xb41f1624f48c1e9,
10541 fidl::encoding::DynamicFlags::FLEXIBLE,
10542 )
10543 }
10544
10545 fn r#set_debug_client_info(
10546 &self,
10547 mut payload: &NodeSetDebugClientInfoRequest,
10548 ) -> Result<(), fidl::Error> {
10549 self.client.send::<NodeSetDebugClientInfoRequest>(
10550 payload,
10551 0x5cde8914608d99b1,
10552 fidl::encoding::DynamicFlags::FLEXIBLE,
10553 )
10554 }
10555
10556 fn r#set_debug_timeout_log_deadline(
10557 &self,
10558 mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
10559 ) -> Result<(), fidl::Error> {
10560 self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
10561 payload,
10562 0x716b0af13d5c0806,
10563 fidl::encoding::DynamicFlags::FLEXIBLE,
10564 )
10565 }
10566
10567 fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
10568 self.client.send::<fidl::encoding::EmptyPayload>(
10569 (),
10570 0x5209c77415b4dfad,
10571 fidl::encoding::DynamicFlags::FLEXIBLE,
10572 )
10573 }
10574
10575 type GetNodeRefResponseFut = fidl::client::QueryResponseFut<
10576 NodeGetNodeRefResponse,
10577 fidl::encoding::DefaultFuchsiaResourceDialect,
10578 >;
10579 fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut {
10580 fn _decode(
10581 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
10582 ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
10583 let _response = fidl::client::decode_transaction_body::<
10584 fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
10585 fidl::encoding::DefaultFuchsiaResourceDialect,
10586 0x5b3d0e51614df053,
10587 >(_buf?)?
10588 .into_result::<BufferCollectionTokenGroupMarker>("get_node_ref")?;
10589 Ok(_response)
10590 }
10591 self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, NodeGetNodeRefResponse>(
10592 (),
10593 0x5b3d0e51614df053,
10594 fidl::encoding::DynamicFlags::FLEXIBLE,
10595 _decode,
10596 )
10597 }
10598
10599 type IsAlternateForResponseFut = fidl::client::QueryResponseFut<
10600 NodeIsAlternateForResult,
10601 fidl::encoding::DefaultFuchsiaResourceDialect,
10602 >;
10603 fn r#is_alternate_for(
10604 &self,
10605 mut payload: NodeIsAlternateForRequest,
10606 ) -> Self::IsAlternateForResponseFut {
10607 fn _decode(
10608 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
10609 ) -> Result<NodeIsAlternateForResult, fidl::Error> {
10610 let _response = fidl::client::decode_transaction_body::<
10611 fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
10612 fidl::encoding::DefaultFuchsiaResourceDialect,
10613 0x3a58e00157e0825,
10614 >(_buf?)?
10615 .into_result::<BufferCollectionTokenGroupMarker>("is_alternate_for")?;
10616 Ok(_response.map(|x| x))
10617 }
10618 self.client.send_query_and_decode::<NodeIsAlternateForRequest, NodeIsAlternateForResult>(
10619 &mut payload,
10620 0x3a58e00157e0825,
10621 fidl::encoding::DynamicFlags::FLEXIBLE,
10622 _decode,
10623 )
10624 }
10625
10626 type GetBufferCollectionIdResponseFut = fidl::client::QueryResponseFut<
10627 NodeGetBufferCollectionIdResponse,
10628 fidl::encoding::DefaultFuchsiaResourceDialect,
10629 >;
10630 fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut {
10631 fn _decode(
10632 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
10633 ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
10634 let _response = fidl::client::decode_transaction_body::<
10635 fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
10636 fidl::encoding::DefaultFuchsiaResourceDialect,
10637 0x77d19a494b78ba8c,
10638 >(_buf?)?
10639 .into_result::<BufferCollectionTokenGroupMarker>("get_buffer_collection_id")?;
10640 Ok(_response)
10641 }
10642 self.client.send_query_and_decode::<
10643 fidl::encoding::EmptyPayload,
10644 NodeGetBufferCollectionIdResponse,
10645 >(
10646 (),
10647 0x77d19a494b78ba8c,
10648 fidl::encoding::DynamicFlags::FLEXIBLE,
10649 _decode,
10650 )
10651 }
10652
10653 fn r#set_weak(&self) -> Result<(), fidl::Error> {
10654 self.client.send::<fidl::encoding::EmptyPayload>(
10655 (),
10656 0x22dd3ea514eeffe1,
10657 fidl::encoding::DynamicFlags::FLEXIBLE,
10658 )
10659 }
10660
10661 fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
10662 self.client.send::<NodeSetWeakOkRequest>(
10663 &mut payload,
10664 0x38a44fc4d7724be9,
10665 fidl::encoding::DynamicFlags::FLEXIBLE,
10666 )
10667 }
10668
10669 fn r#attach_node_tracking(
10670 &self,
10671 mut payload: NodeAttachNodeTrackingRequest,
10672 ) -> Result<(), fidl::Error> {
10673 self.client.send::<NodeAttachNodeTrackingRequest>(
10674 &mut payload,
10675 0x3f22f2a293d3cdac,
10676 fidl::encoding::DynamicFlags::FLEXIBLE,
10677 )
10678 }
10679
10680 fn r#create_child(
10681 &self,
10682 mut payload: BufferCollectionTokenGroupCreateChildRequest,
10683 ) -> Result<(), fidl::Error> {
10684 self.client.send::<BufferCollectionTokenGroupCreateChildRequest>(
10685 &mut payload,
10686 0x41a0075d419f30c5,
10687 fidl::encoding::DynamicFlags::FLEXIBLE,
10688 )
10689 }
10690
10691 type CreateChildrenSyncResponseFut = fidl::client::QueryResponseFut<
10692 BufferCollectionTokenGroupCreateChildrenSyncResponse,
10693 fidl::encoding::DefaultFuchsiaResourceDialect,
10694 >;
10695 fn r#create_children_sync(
10696 &self,
10697 mut payload: &BufferCollectionTokenGroupCreateChildrenSyncRequest,
10698 ) -> Self::CreateChildrenSyncResponseFut {
10699 fn _decode(
10700 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
10701 ) -> Result<BufferCollectionTokenGroupCreateChildrenSyncResponse, fidl::Error> {
10702 let _response = fidl::client::decode_transaction_body::<
10703 fidl::encoding::FlexibleType<BufferCollectionTokenGroupCreateChildrenSyncResponse>,
10704 fidl::encoding::DefaultFuchsiaResourceDialect,
10705 0x15dea448c536070a,
10706 >(_buf?)?
10707 .into_result::<BufferCollectionTokenGroupMarker>("create_children_sync")?;
10708 Ok(_response)
10709 }
10710 self.client.send_query_and_decode::<
10711 BufferCollectionTokenGroupCreateChildrenSyncRequest,
10712 BufferCollectionTokenGroupCreateChildrenSyncResponse,
10713 >(
10714 payload,
10715 0x15dea448c536070a,
10716 fidl::encoding::DynamicFlags::FLEXIBLE,
10717 _decode,
10718 )
10719 }
10720
10721 fn r#all_children_present(&self) -> Result<(), fidl::Error> {
10722 self.client.send::<fidl::encoding::EmptyPayload>(
10723 (),
10724 0x5c327e4a23391312,
10725 fidl::encoding::DynamicFlags::FLEXIBLE,
10726 )
10727 }
10728}
10729
10730pub struct BufferCollectionTokenGroupEventStream {
10731 event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
10732}
10733
10734impl std::marker::Unpin for BufferCollectionTokenGroupEventStream {}
10735
10736impl futures::stream::FusedStream for BufferCollectionTokenGroupEventStream {
10737 fn is_terminated(&self) -> bool {
10738 self.event_receiver.is_terminated()
10739 }
10740}
10741
10742impl futures::Stream for BufferCollectionTokenGroupEventStream {
10743 type Item = Result<BufferCollectionTokenGroupEvent, fidl::Error>;
10744
10745 fn poll_next(
10746 mut self: std::pin::Pin<&mut Self>,
10747 cx: &mut std::task::Context<'_>,
10748 ) -> std::task::Poll<Option<Self::Item>> {
10749 match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
10750 &mut self.event_receiver,
10751 cx
10752 )?) {
10753 Some(buf) => std::task::Poll::Ready(Some(BufferCollectionTokenGroupEvent::decode(buf))),
10754 None => std::task::Poll::Ready(None),
10755 }
10756 }
10757}
10758
10759#[derive(Debug)]
10760pub enum BufferCollectionTokenGroupEvent {
10761 #[non_exhaustive]
10762 _UnknownEvent {
10763 /// Ordinal of the event that was sent.
10764 ordinal: u64,
10765 },
10766}
10767
10768impl BufferCollectionTokenGroupEvent {
10769 /// Decodes a message buffer as a [`BufferCollectionTokenGroupEvent`].
10770 fn decode(
10771 mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
10772 ) -> Result<BufferCollectionTokenGroupEvent, fidl::Error> {
10773 let (bytes, _handles) = buf.split_mut();
10774 let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
10775 debug_assert_eq!(tx_header.tx_id, 0);
10776 match tx_header.ordinal {
10777 _ if tx_header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
10778 Ok(BufferCollectionTokenGroupEvent::_UnknownEvent {
10779 ordinal: tx_header.ordinal,
10780 })
10781 }
10782 _ => Err(fidl::Error::UnknownOrdinal {
10783 ordinal: tx_header.ordinal,
10784 protocol_name: <BufferCollectionTokenGroupMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
10785 })
10786 }
10787 }
10788}
10789
10790/// A Stream of incoming requests for fuchsia.sysmem2/BufferCollectionTokenGroup.
10791pub struct BufferCollectionTokenGroupRequestStream {
10792 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
10793 is_terminated: bool,
10794}
10795
10796impl std::marker::Unpin for BufferCollectionTokenGroupRequestStream {}
10797
10798impl futures::stream::FusedStream for BufferCollectionTokenGroupRequestStream {
10799 fn is_terminated(&self) -> bool {
10800 self.is_terminated
10801 }
10802}
10803
10804impl fidl::endpoints::RequestStream for BufferCollectionTokenGroupRequestStream {
10805 type Protocol = BufferCollectionTokenGroupMarker;
10806 type ControlHandle = BufferCollectionTokenGroupControlHandle;
10807
10808 fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
10809 Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
10810 }
10811
10812 fn control_handle(&self) -> Self::ControlHandle {
10813 BufferCollectionTokenGroupControlHandle { inner: self.inner.clone() }
10814 }
10815
10816 fn into_inner(
10817 self,
10818 ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
10819 {
10820 (self.inner, self.is_terminated)
10821 }
10822
10823 fn from_inner(
10824 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
10825 is_terminated: bool,
10826 ) -> Self {
10827 Self { inner, is_terminated }
10828 }
10829}
10830
10831impl futures::Stream for BufferCollectionTokenGroupRequestStream {
10832 type Item = Result<BufferCollectionTokenGroupRequest, fidl::Error>;
10833
10834 fn poll_next(
10835 mut self: std::pin::Pin<&mut Self>,
10836 cx: &mut std::task::Context<'_>,
10837 ) -> std::task::Poll<Option<Self::Item>> {
10838 let this = &mut *self;
10839 if this.inner.check_shutdown(cx) {
10840 this.is_terminated = true;
10841 return std::task::Poll::Ready(None);
10842 }
10843 if this.is_terminated {
10844 panic!("polled BufferCollectionTokenGroupRequestStream after completion");
10845 }
10846 fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
10847 |bytes, handles| {
10848 match this.inner.channel().read_etc(cx, bytes, handles) {
10849 std::task::Poll::Ready(Ok(())) => {}
10850 std::task::Poll::Pending => return std::task::Poll::Pending,
10851 std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
10852 this.is_terminated = true;
10853 return std::task::Poll::Ready(None);
10854 }
10855 std::task::Poll::Ready(Err(e)) => {
10856 return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
10857 e.into(),
10858 ))));
10859 }
10860 }
10861
10862 // A message has been received from the channel
10863 let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
10864
10865 std::task::Poll::Ready(Some(match header.ordinal {
10866 0x11ac2555cf575b54 => {
10867 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
10868 let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
10869 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
10870 let control_handle = BufferCollectionTokenGroupControlHandle {
10871 inner: this.inner.clone(),
10872 };
10873 Ok(BufferCollectionTokenGroupRequest::Sync {
10874 responder: BufferCollectionTokenGroupSyncResponder {
10875 control_handle: std::mem::ManuallyDrop::new(control_handle),
10876 tx_id: header.tx_id,
10877 },
10878 })
10879 }
10880 0x6a5cae7d6d6e04c6 => {
10881 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10882 let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
10883 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
10884 let control_handle = BufferCollectionTokenGroupControlHandle {
10885 inner: this.inner.clone(),
10886 };
10887 Ok(BufferCollectionTokenGroupRequest::Release {
10888 control_handle,
10889 })
10890 }
10891 0xb41f1624f48c1e9 => {
10892 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10893 let mut req = fidl::new_empty!(NodeSetNameRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
10894 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetNameRequest>(&header, _body_bytes, handles, &mut req)?;
10895 let control_handle = BufferCollectionTokenGroupControlHandle {
10896 inner: this.inner.clone(),
10897 };
10898 Ok(BufferCollectionTokenGroupRequest::SetName {payload: req,
10899 control_handle,
10900 })
10901 }
10902 0x5cde8914608d99b1 => {
10903 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10904 let mut req = fidl::new_empty!(NodeSetDebugClientInfoRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
10905 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugClientInfoRequest>(&header, _body_bytes, handles, &mut req)?;
10906 let control_handle = BufferCollectionTokenGroupControlHandle {
10907 inner: this.inner.clone(),
10908 };
10909 Ok(BufferCollectionTokenGroupRequest::SetDebugClientInfo {payload: req,
10910 control_handle,
10911 })
10912 }
10913 0x716b0af13d5c0806 => {
10914 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10915 let mut req = fidl::new_empty!(NodeSetDebugTimeoutLogDeadlineRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
10916 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugTimeoutLogDeadlineRequest>(&header, _body_bytes, handles, &mut req)?;
10917 let control_handle = BufferCollectionTokenGroupControlHandle {
10918 inner: this.inner.clone(),
10919 };
10920 Ok(BufferCollectionTokenGroupRequest::SetDebugTimeoutLogDeadline {payload: req,
10921 control_handle,
10922 })
10923 }
10924 0x5209c77415b4dfad => {
10925 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10926 let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
10927 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
10928 let control_handle = BufferCollectionTokenGroupControlHandle {
10929 inner: this.inner.clone(),
10930 };
10931 Ok(BufferCollectionTokenGroupRequest::SetVerboseLogging {
10932 control_handle,
10933 })
10934 }
10935 0x5b3d0e51614df053 => {
10936 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
10937 let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
10938 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
10939 let control_handle = BufferCollectionTokenGroupControlHandle {
10940 inner: this.inner.clone(),
10941 };
10942 Ok(BufferCollectionTokenGroupRequest::GetNodeRef {
10943 responder: BufferCollectionTokenGroupGetNodeRefResponder {
10944 control_handle: std::mem::ManuallyDrop::new(control_handle),
10945 tx_id: header.tx_id,
10946 },
10947 })
10948 }
10949 0x3a58e00157e0825 => {
10950 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
10951 let mut req = fidl::new_empty!(NodeIsAlternateForRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
10952 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeIsAlternateForRequest>(&header, _body_bytes, handles, &mut req)?;
10953 let control_handle = BufferCollectionTokenGroupControlHandle {
10954 inner: this.inner.clone(),
10955 };
10956 Ok(BufferCollectionTokenGroupRequest::IsAlternateFor {payload: req,
10957 responder: BufferCollectionTokenGroupIsAlternateForResponder {
10958 control_handle: std::mem::ManuallyDrop::new(control_handle),
10959 tx_id: header.tx_id,
10960 },
10961 })
10962 }
10963 0x77d19a494b78ba8c => {
10964 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
10965 let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
10966 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
10967 let control_handle = BufferCollectionTokenGroupControlHandle {
10968 inner: this.inner.clone(),
10969 };
10970 Ok(BufferCollectionTokenGroupRequest::GetBufferCollectionId {
10971 responder: BufferCollectionTokenGroupGetBufferCollectionIdResponder {
10972 control_handle: std::mem::ManuallyDrop::new(control_handle),
10973 tx_id: header.tx_id,
10974 },
10975 })
10976 }
10977 0x22dd3ea514eeffe1 => {
10978 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10979 let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
10980 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
10981 let control_handle = BufferCollectionTokenGroupControlHandle {
10982 inner: this.inner.clone(),
10983 };
10984 Ok(BufferCollectionTokenGroupRequest::SetWeak {
10985 control_handle,
10986 })
10987 }
10988 0x38a44fc4d7724be9 => {
10989 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10990 let mut req = fidl::new_empty!(NodeSetWeakOkRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
10991 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetWeakOkRequest>(&header, _body_bytes, handles, &mut req)?;
10992 let control_handle = BufferCollectionTokenGroupControlHandle {
10993 inner: this.inner.clone(),
10994 };
10995 Ok(BufferCollectionTokenGroupRequest::SetWeakOk {payload: req,
10996 control_handle,
10997 })
10998 }
10999 0x3f22f2a293d3cdac => {
11000 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
11001 let mut req = fidl::new_empty!(NodeAttachNodeTrackingRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
11002 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeAttachNodeTrackingRequest>(&header, _body_bytes, handles, &mut req)?;
11003 let control_handle = BufferCollectionTokenGroupControlHandle {
11004 inner: this.inner.clone(),
11005 };
11006 Ok(BufferCollectionTokenGroupRequest::AttachNodeTracking {payload: req,
11007 control_handle,
11008 })
11009 }
11010 0x41a0075d419f30c5 => {
11011 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
11012 let mut req = fidl::new_empty!(BufferCollectionTokenGroupCreateChildRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
11013 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionTokenGroupCreateChildRequest>(&header, _body_bytes, handles, &mut req)?;
11014 let control_handle = BufferCollectionTokenGroupControlHandle {
11015 inner: this.inner.clone(),
11016 };
11017 Ok(BufferCollectionTokenGroupRequest::CreateChild {payload: req,
11018 control_handle,
11019 })
11020 }
11021 0x15dea448c536070a => {
11022 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
11023 let mut req = fidl::new_empty!(BufferCollectionTokenGroupCreateChildrenSyncRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
11024 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionTokenGroupCreateChildrenSyncRequest>(&header, _body_bytes, handles, &mut req)?;
11025 let control_handle = BufferCollectionTokenGroupControlHandle {
11026 inner: this.inner.clone(),
11027 };
11028 Ok(BufferCollectionTokenGroupRequest::CreateChildrenSync {payload: req,
11029 responder: BufferCollectionTokenGroupCreateChildrenSyncResponder {
11030 control_handle: std::mem::ManuallyDrop::new(control_handle),
11031 tx_id: header.tx_id,
11032 },
11033 })
11034 }
11035 0x5c327e4a23391312 => {
11036 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
11037 let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
11038 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
11039 let control_handle = BufferCollectionTokenGroupControlHandle {
11040 inner: this.inner.clone(),
11041 };
11042 Ok(BufferCollectionTokenGroupRequest::AllChildrenPresent {
11043 control_handle,
11044 })
11045 }
11046 _ if header.tx_id == 0 && header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
11047 Ok(BufferCollectionTokenGroupRequest::_UnknownMethod {
11048 ordinal: header.ordinal,
11049 control_handle: BufferCollectionTokenGroupControlHandle { inner: this.inner.clone() },
11050 method_type: fidl::MethodType::OneWay,
11051 })
11052 }
11053 _ if header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
11054 this.inner.send_framework_err(
11055 fidl::encoding::FrameworkErr::UnknownMethod,
11056 header.tx_id,
11057 header.ordinal,
11058 header.dynamic_flags(),
11059 (bytes, handles),
11060 )?;
11061 Ok(BufferCollectionTokenGroupRequest::_UnknownMethod {
11062 ordinal: header.ordinal,
11063 control_handle: BufferCollectionTokenGroupControlHandle { inner: this.inner.clone() },
11064 method_type: fidl::MethodType::TwoWay,
11065 })
11066 }
11067 _ => Err(fidl::Error::UnknownOrdinal {
11068 ordinal: header.ordinal,
11069 protocol_name: <BufferCollectionTokenGroupMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
11070 }),
11071 }))
11072 },
11073 )
11074 }
11075}
11076
11077/// The sysmem implementation is consistent with a logical / conceptual model of
11078/// allocation / logical allocation as follows:
11079///
11080/// As usual, a logical allocation considers either the root and all nodes with
11081/// connectivity to the root that don't transit a [`fuchsia.sysmem2/Node`]
11082/// created with [`fuchsia.sysmem2/BufferCollection.AttachToken`], or a subtree
11083/// rooted at an `AttachToken` `Node` and all `Node`(s) with connectivity to
11084/// that subtree that don't transit another `AttachToken`. This is called the
11085/// logical allocation pruned subtree, or pruned subtree for short.
11086///
11087/// During constraints aggregation, each
11088/// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] will select a single child
11089/// `Node` among its direct children. The rest of the children will appear to
11090/// fail the logical allocation, while the selected child may succeed.
11091///
11092/// When more than one `BufferCollectionTokenGroup` exists in the overall
11093/// logical allocation pruned subtree, the relative priority between two groups
11094/// is equivalent to their ordering in a DFS pre-order iteration of the tree,
11095/// with parents higher priority than children, and left children higher
11096/// priority than right children.
11097///
11098/// When a particular child of a group is selected (whether provisionally during
11099/// a constraints aggregation attempt, or as a final selection), the
11100/// non-selection of other children of the group will "hide" any other groups
11101/// under those non-selected children.
11102///
11103/// Within a logical allocation, aggregation is attempted first by provisionally
11104/// selecting child 0 of the highest-priority group, and child 0 of the next
11105/// highest-priority group that isn't hidden by the provisional selections so
11106/// far, etc.
11107///
11108/// If that aggregation attempt fails, aggregation will be attempted with the
11109/// ordinal 0 child of all the same groups except the lowest priority non-hidden
11110/// group which will provisionally select its ordinal 1 child (and then child 2
11111/// and so on). If a new lowest-priority group is un-hidden as provisional
11112/// selections are updated, that newly un-hidden lowest-priority group has all
11113/// its children considered in order, before changing the provisional selection
11114/// in the former lowest-priority group. In terms of result, this is equivalent
11115/// to systematic enumeration of all possible combinations of choices in a
11116/// counting-like order updating the lowest-priority group the most often and
11117/// the highest-priority group the least often. Rather than actually attempting
11118/// aggregation with all the combinations, we can skip over combinations which
11119/// are redundant/equivalent due to hiding without any change to the result.
11120///
11121/// Attempted constraint aggregations of enumerated non-equivalent combinations
11122/// of choices continue in this manner until either (a) all aggregation attempts
11123/// fail in which case the overall logical allocation fails, or (b) until an
11124/// attempted aggregation succeeds, in which case buffer allocation (if needed;
11125/// if this is the pruned subtree rooted at the overall root `Node`) is
11126/// attempted once. If buffer allocation based on the first successful
11127/// constraints aggregation fails, the overall logical allocation fails (there
11128/// is no buffer allocation retry / re-attempt). If buffer allocation succeeds
11129/// (or is not needed due to being a pruned subtree that doesn't include the
11130/// root), the logical allocation succeeds.
11131///
11132/// If this prioritization scheme cannot reasonably work for your usage of
11133/// sysmem, please don't hesitate to contact sysmem folks to discuss potentially
11134/// adding a way to achieve what you need.
11135///
11136/// Please avoid creating a large number of `BufferCollectionTokenGroup`(s) per
11137/// logical allocation, especially with large number of children overall, and
11138/// especially in cases where aggregation may reasonably be expected to often
11139/// fail using ordinal 0 children and possibly with later children as well.
11140/// Sysmem mitigates potentially high time complexity of evaluating too many
11141/// child combinations/selections across too many groups by simply failing
11142/// logical allocation beyond a certain (fairly high, but not huge) max number
11143/// of considered group child combinations/selections. More advanced (and more
11144/// complicated) mitigation is not anticipated to be practically necessary or
11145/// worth the added complexity. Please contact sysmem folks if the max limit is
11146/// getting hit or if you anticipate it getting hit, to discuss potential
11147/// options.
11148///
11149/// Prefer to use multiple [`fuchsia.sysmem2/ImageFormatConstraints`] in a
11150/// single [`fuchsia.sysmem2/BufferCollectionConstraints`] when feasible (when a
11151/// participant just needs to express the ability to work with more than a
11152/// single [`fuchsia.images2/PixelFormat`], with sysmem choosing which
11153/// `PixelFormat` to use among those supported by all participants).
11154///
11155/// Similar to [`fuchsia.sysmem2/BufferCollectionToken`] and
11156/// [`fuchsia.sysmem2/BufferCollection`], closure of the
11157/// `BufferCollectionTokenGroup` channel without sending
11158/// [`fuchsia.sysmem2/Node.Release`] first will cause buffer collection failure
11159/// (or subtree failure if using
11160/// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
11161/// [`fuchsia.sysmem2/BufferCollection.AttachToken`] and the
11162/// `BufferCollectionTokenGroup` is part of a subtree under such a node that
11163/// doesn't propagate failure to its parent).
11164///
11165/// Epitaphs are not used in this protocol.
11166#[derive(Debug)]
11167pub enum BufferCollectionTokenGroupRequest {
11168 /// Ensure that previous messages have been received server side. This is
11169 /// particularly useful after previous messages that created new tokens,
11170 /// because a token must be known to the sysmem server before sending the
11171 /// token to another participant.
11172 ///
11173 /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
11174 /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
11175 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
11176 /// to mitigate the possibility of a hostile/fake
11177 /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
11178 /// Another way is to pass the token to
11179 /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
11180 /// the token as part of exchanging it for a
11181 /// [`fuchsia.sysmem2/BufferCollection`] channel, and
11182 /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
11183 /// of stalling.
11184 ///
11185 /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
11186 /// and then starting and completing a `Sync`, it's then safe to send the
11187 /// `BufferCollectionToken` client ends to other participants knowing the
11188 /// server will recognize the tokens when they're sent by the other
11189 /// participants to sysmem in a
11190 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
11191 /// efficient way to create tokens while avoiding unnecessary round trips.
11192 ///
11193 /// Other options include waiting for each
11194 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
11195 /// individually (using separate call to `Sync` after each), or calling
11196 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
11197 /// converted to a `BufferCollection` via
11198 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
11199 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
11200 /// the sync step and can create multiple tokens at once.
11201 Sync { responder: BufferCollectionTokenGroupSyncResponder },
11202 /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
11203 ///
11204 /// Normally a participant will convert a `BufferCollectionToken` into a
11205 /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
11206 /// `Release` via the token (and then close the channel immediately or
11207 /// shortly later in response to server closing the server end), which
11208 /// avoids causing buffer collection failure. Without a prior `Release`,
11209 /// closing the `BufferCollectionToken` client end will cause buffer
11210 /// collection failure.
11211 ///
11212 /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
11213 ///
11214 /// By default the server handles unexpected closure of a
11215 /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
11216 /// first) by failing the buffer collection. Partly this is to expedite
11217 /// closing VMO handles to reclaim memory when any participant fails. If a
11218 /// participant would like to cleanly close a `BufferCollection` without
11219 /// causing buffer collection failure, the participant can send `Release`
11220 /// before closing the `BufferCollection` client end. The `Release` can
11221 /// occur before or after `SetConstraints`. If before `SetConstraints`, the
11222 /// buffer collection won't require constraints from this node in order to
11223 /// allocate. If after `SetConstraints`, the constraints are retained and
11224 /// aggregated, despite the lack of `BufferCollection` connection at the
11225 /// time of constraints aggregation.
11226 ///
11227 /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
11228 ///
11229 /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
11230 /// end (without `Release` first) will trigger failure of the buffer
11231 /// collection. To close a `BufferCollectionTokenGroup` channel without
11232 /// failing the buffer collection, ensure that AllChildrenPresent() has been
11233 /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
11234 /// client end.
11235 ///
11236 /// If `Release` occurs before
11237 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
11238 /// buffer collection will fail (triggered by reception of `Release` without
11239 /// prior `AllChildrenPresent`). This is intentionally not analogous to how
11240 /// [`fuchsia.sysmem2/BufferCollection.Release`] without
11241 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
11242 /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
11243 /// close requires `AllChildrenPresent` (if not already sent), then
11244 /// `Release`, then close client end.
11245 ///
11246 /// If `Release` occurs after `AllChildrenPresent`, the children and all
11247 /// their constraints remain intact (just as they would if the
11248 /// `BufferCollectionTokenGroup` channel had remained open), and the client
11249 /// end close doesn't trigger buffer collection failure.
11250 ///
11251 /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
11252 ///
11253 /// For brevity, the per-channel-protocol paragraphs above ignore the
11254 /// separate failure domain created by
11255 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
11256 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
11257 /// unexpectedly closes (without `Release` first) and that client end is
11258 /// under a failure domain, instead of failing the whole buffer collection,
11259 /// the failure domain is failed, but the buffer collection itself is
11260 /// isolated from failure of the failure domain. Such failure domains can be
11261 /// nested, in which case only the inner-most failure domain in which the
11262 /// `Node` resides fails.
11263 Release { control_handle: BufferCollectionTokenGroupControlHandle },
11264 /// Set a name for VMOs in this buffer collection.
11265 ///
11266 /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
11267 /// will be truncated to fit. The name of the vmo will be suffixed with the
11268 /// buffer index within the collection (if the suffix fits within
11269 /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
11270 /// listed in the inspect data.
11271 ///
11272 /// The name only affects VMOs allocated after the name is set; this call
11273 /// does not rename existing VMOs. If multiple clients set different names
11274 /// then the larger priority value will win. Setting a new name with the
11275 /// same priority as a prior name doesn't change the name.
11276 ///
11277 /// All table fields are currently required.
11278 ///
11279 /// + request `priority` The name is only set if this is the first `SetName`
11280 /// or if `priority` is greater than any previous `priority` value in
11281 /// prior `SetName` calls across all `Node`(s) of this buffer collection.
11282 /// + request `name` The name for VMOs created under this buffer collection.
11283 SetName { payload: NodeSetNameRequest, control_handle: BufferCollectionTokenGroupControlHandle },
11284 /// Set information about the current client that can be used by sysmem to
11285 /// help diagnose leaking memory and allocation stalls waiting for a
11286 /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
11287 ///
11288 /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
11289 /// `Node`(s) derived from this `Node`, unless overriden by
11290 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
11291 /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
11292 ///
11293 /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
11294 /// `Allocator` is the most efficient way to ensure that all
11295 /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
11296 /// set, and is also more efficient than separately sending the same debug
11297 /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
11298 /// created [`fuchsia.sysmem2/Node`].
11299 ///
11300 /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
11301 /// indicate which client is closing their channel first, leading to subtree
11302 /// failure (which can be normal if the purpose of the subtree is over, but
11303 /// if happening earlier than expected, the client-channel-specific name can
11304 /// help diagnose where the failure is first coming from, from sysmem's
11305 /// point of view).
11306 ///
11307 /// All table fields are currently required.
11308 ///
11309 /// + request `name` This can be an arbitrary string, but the current
11310 /// process name (see `fsl::GetCurrentProcessName`) is a good default.
11311 /// + request `id` This can be an arbitrary id, but the current process ID
11312 /// (see `fsl::GetCurrentProcessKoid`) is a good default.
11313 SetDebugClientInfo {
11314 payload: NodeSetDebugClientInfoRequest,
11315 control_handle: BufferCollectionTokenGroupControlHandle,
11316 },
11317 /// Sysmem logs a warning if sysmem hasn't seen
11318 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
11319 /// within 5 seconds after creation of a new collection.
11320 ///
11321 /// Clients can call this method to change when the log is printed. If
11322 /// multiple client set the deadline, it's unspecified which deadline will
11323 /// take effect.
11324 ///
11325 /// In most cases the default works well.
11326 ///
11327 /// All table fields are currently required.
11328 ///
11329 /// + request `deadline` The time at which sysmem will start trying to log
11330 /// the warning, unless all constraints are with sysmem by then.
11331 SetDebugTimeoutLogDeadline {
11332 payload: NodeSetDebugTimeoutLogDeadlineRequest,
11333 control_handle: BufferCollectionTokenGroupControlHandle,
11334 },
11335 /// This enables verbose logging for the buffer collection.
11336 ///
11337 /// Verbose logging includes constraints set via
11338 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
11339 /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
11340 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
11341 /// the tree of `Node`(s).
11342 ///
11343 /// Normally sysmem prints only a single line complaint when aggregation
11344 /// fails, with just the specific detailed reason that aggregation failed,
11345 /// with little surrounding context. While this is often enough to diagnose
11346 /// a problem if only a small change was made and everything was working
11347 /// before the small change, it's often not particularly helpful for getting
11348 /// a new buffer collection to work for the first time. Especially with
11349 /// more complex trees of nodes, involving things like
11350 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
11351 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
11352 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
11353 /// subtrees of nodes, verbose logging may help in diagnosing what the tree
11354 /// looks like and why it's failing a logical allocation, or why a tree or
11355 /// subtree is failing sooner than expected.
11356 ///
11357 /// The intent of the extra logging is to be acceptable from a performance
11358 /// point of view, under the assumption that verbose logging is only enabled
11359 /// on a low number of buffer collections. If we're not tracking down a bug,
11360 /// we shouldn't send this message.
11361 SetVerboseLogging { control_handle: BufferCollectionTokenGroupControlHandle },
11362 /// This gets a handle that can be used as a parameter to
11363 /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
11364 /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
11365 /// client obtained this handle from this `Node`.
11366 ///
11367 /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
11368 /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
11369 /// despite the two calls typically being on different channels.
11370 ///
11371 /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
11372 ///
11373 /// All table fields are currently required.
11374 ///
11375 /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
11376 /// different `Node` channel, to prove that the client obtained the handle
11377 /// from this `Node`.
11378 GetNodeRef { responder: BufferCollectionTokenGroupGetNodeRefResponder },
11379 /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
11380 /// rooted at a different child token of a common parent
11381 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
11382 /// passed-in `node_ref`.
11383 ///
11384 /// This call is for assisting with admission control de-duplication, and
11385 /// with debugging.
11386 ///
11387 /// The `node_ref` must be obtained using
11388 /// [`fuchsia.sysmem2/Node.GetNodeRef`].
11389 ///
11390 /// The `node_ref` can be a duplicated handle; it's not necessary to call
11391 /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
11392 ///
11393 /// If a calling token may not actually be a valid token at all due to a
11394 /// potentially hostile/untrusted provider of the token, call
11395 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
11396 /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
11397 /// never responds due to a calling token not being a real token (not really
11398 /// talking to sysmem). Another option is to call
11399 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
11400 /// which also validates the token along with converting it to a
11401 /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
11402 ///
11403 /// All table fields are currently required.
11404 ///
11405 /// - response `is_alternate`
11406 /// - true: The first parent node in common between the calling node and
11407 /// the `node_ref` `Node` is a `BufferCollectionTokenGroup`. This means
11408 /// that the calling `Node` and the `node_ref` `Node` will not have both
11409 /// their constraints apply - rather sysmem will choose one or the other
11410 /// of the constraints - never both. This is because only one child of
11411 /// a `BufferCollectionTokenGroup` is selected during logical
11412 /// allocation, with only that one child's subtree contributing to
11413 /// constraints aggregation.
11414 /// - false: The first parent node in common between the calling `Node`
11415 /// and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
11416 /// Currently, this means the first parent node in common is a
11417 /// `BufferCollectionToken` or `BufferCollection` (regardless of not
11418 /// `Release`ed). This means that the calling `Node` and the `node_ref`
11419 /// `Node` may have both their constraints apply during constraints
11420 /// aggregation of the logical allocation, if both `Node`(s) are
11421 /// selected by any parent `BufferCollectionTokenGroup`(s) involved. In
11422 /// this case, there is no `BufferCollectionTokenGroup` that will
11423 /// directly prevent the two `Node`(s) from both being selected and
11424 /// their constraints both aggregated, but even when false, one or both
11425 /// `Node`(s) may still be eliminated from consideration if one or both
11426 /// `Node`(s) has a direct or indirect parent
11427 /// `BufferCollectionTokenGroup` which selects a child subtree other
11428 /// than the subtree containing the calling `Node` or `node_ref` `Node`.
11429 /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
11430 /// associated with the same buffer collection as the calling `Node`.
11431 /// Another reason for this error is if the `node_ref` is an
11432 /// [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
11433 /// a real `node_ref` obtained from `GetNodeRef`.
11434 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
11435 /// `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
11436 /// the needed rights expected on a real `node_ref`.
11437 /// * No other failing status codes are returned by this call. However,
11438 /// sysmem may add additional codes in future, so the client should have
11439 /// sensible default handling for any failing status code.
11440 IsAlternateFor {
11441 payload: NodeIsAlternateForRequest,
11442 responder: BufferCollectionTokenGroupIsAlternateForResponder,
11443 },
11444 /// Get the buffer collection ID. This ID is also available from
11445 /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
11446 /// within the collection).
11447 ///
11448 /// This call is mainly useful in situations where we can't convey a
11449 /// [`fuchsia.sysmem2/BufferCollectionToken`] or
11450 /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
11451 /// handle, which can be joined back up with a `BufferCollection` client end
11452 /// that was created via a different path. Prefer to convey a
11453 /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
11454 ///
11455 /// Trusting a `buffer_collection_id` value from a source other than sysmem
11456 /// is analogous to trusting a koid value from a source other than zircon.
11457 /// Both should be avoided unless really necessary, and both require
11458 /// caution. In some situations it may be reasonable to refer to a
11459 /// pre-established `BufferCollection` by `buffer_collection_id` via a
11460 /// protocol for efficiency reasons, but an incoming value purporting to be
11461 /// a `buffer_collection_id` is not sufficient alone to justify granting the
11462 /// sender of the `buffer_collection_id` any capability. The sender must
11463 /// first prove to a receiver that the sender has/had a VMO or has/had a
11464 /// `BufferCollectionToken` to the same collection by sending a handle that
11465 /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
11466 /// `buffer_collection_id` value. The receiver should take care to avoid
11467 /// assuming that a sender had a `BufferCollectionToken` in cases where the
11468 /// sender has only proven that the sender had a VMO.
11469 ///
11470 /// - response `buffer_collection_id` This ID is unique per buffer
11471 /// collection per boot. Each buffer is uniquely identified by the
11472 /// `buffer_collection_id` and `buffer_index` together.
11473 GetBufferCollectionId { responder: BufferCollectionTokenGroupGetBufferCollectionIdResponder },
11474 /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
11475 /// created after this message to weak, which means that a client's `Node`
11476 /// client end (or a child created after this message) is not alone
11477 /// sufficient to keep allocated VMOs alive.
11478 ///
11479 /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
11480 /// `close_weak_asap`.
11481 ///
11482 /// This message is only permitted before the `Node` becomes ready for
11483 /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
11484 /// * `BufferCollectionToken`: any time
11485 /// * `BufferCollection`: before `SetConstraints`
11486 /// * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
11487 ///
11488 /// Currently, no conversion from strong `Node` to weak `Node` after ready
11489 /// for allocation is provided, but a client can simulate that by creating
11490 /// an additional `Node` before allocation and setting that additional
11491 /// `Node` to weak, and then potentially at some point later sending
11492 /// `Release` and closing the client end of the client's strong `Node`, but
11493 /// keeping the client's weak `Node`.
11494 ///
11495 /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
11496 /// collection failure (all `Node` client end(s) will see
11497 /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
11498 /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
11499 /// this situation until all `Node`(s) are ready for allocation. For initial
11500 /// allocation to succeed, at least one strong `Node` is required to exist
11501 /// at allocation time, but after that client receives VMO handles, that
11502 /// client can `BufferCollection.Release` and close the client end without
11503 /// causing this type of failure.
11504 ///
11505 /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
11506 /// imply `SetWeakOk` with `for_children_also` true, which can be sent
11507 /// separately as appropriate.
11508 SetWeak { control_handle: BufferCollectionTokenGroupControlHandle },
11509 /// This indicates to sysmem that the client is prepared to pay attention to
11510 /// `close_weak_asap`.
11511 ///
11512 /// If sent, this message must be before
11513 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
11514 ///
11515 /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
11516 /// send this message before `WaitForAllBuffersAllocated`, or a parent
11517 /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
11518 /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
11519 /// trigger buffer collection failure.
11520 ///
11521 /// This message is necessary because weak sysmem VMOs have not always been
11522 /// a thing, so older clients are not aware of the need to pay attention to
11523 /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
11524 /// sysmem weak VMO handles asap. By having this message and requiring
11525 /// participants to indicate their acceptance of this aspect of the overall
11526 /// protocol, we avoid situations where an older client is delivered a weak
11527 /// VMO without any way for sysmem to get that VMO to close quickly later
11528 /// (and on a per-buffer basis).
11529 ///
11530 /// A participant that doesn't handle `close_weak_asap` and also doesn't
11531 /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
11532 /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
11533 /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
11534 /// same participant has a child/delegate which does retrieve VMOs, that
11535 /// child/delegate will need to send `SetWeakOk` before
11536 /// `WaitForAllBuffersAllocated`.
11537 ///
11538 /// + request `for_child_nodes_also` If present and true, this means direct
11539 /// child nodes of this node created after this message plus all
11540 /// descendants of those nodes will behave as if `SetWeakOk` was sent on
11541 /// those nodes. Any child node of this node that was created before this
11542 /// message is not included. This setting is "sticky" in the sense that a
11543 /// subsequent `SetWeakOk` without this bool set to true does not reset
11544 /// the server-side bool. If this creates a problem for a participant, a
11545 /// workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
11546 /// tokens instead, as appropriate. A participant should only set
11547 /// `for_child_nodes_also` true if the participant can really promise to
11548 /// obey `close_weak_asap` both for its own weak VMO handles, and for all
11549 /// weak VMO handles held by participants holding the corresponding child
11550 /// `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
11551 /// which are using sysmem(1) can be weak, despite the clients of those
11552 /// sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
11553 /// direct way to find out about `close_weak_asap`. This only applies to
11554 /// descendents of this `Node` which are using sysmem(1), not to this
11555 /// `Node` when converted directly from a sysmem2 token to a sysmem(1)
11556 /// token, which will fail allocation unless an ancestor of this `Node`
11557 /// specified `for_child_nodes_also` true.
11558 SetWeakOk {
11559 payload: NodeSetWeakOkRequest,
11560 control_handle: BufferCollectionTokenGroupControlHandle,
11561 },
11562 /// The server_end will be closed after this `Node` and any child nodes have
11563 /// have released their buffer counts, making those counts available for
11564 /// reservation by a different `Node` via
11565 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
11566 ///
11567 /// The `Node` buffer counts may not be released until the entire tree of
11568 /// `Node`(s) is closed or failed, because
11569 /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
11570 /// does not immediately un-reserve the `Node` buffer counts. Instead, the
11571 /// `Node` buffer counts remain reserved until the orphaned node is later
11572 /// cleaned up.
11573 ///
11574 /// If the `Node` exceeds a fairly large number of attached eventpair server
11575 /// ends, a log message will indicate this and the `Node` (and the
11576 /// appropriate) sub-tree will fail.
11577 ///
11578 /// The `server_end` will remain open when
11579 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
11580 /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
11581 /// [`fuchsia.sysmem2/BufferCollection`].
11582 ///
11583 /// This message can also be used with a
11584 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
11585 AttachNodeTracking {
11586 payload: NodeAttachNodeTrackingRequest,
11587 control_handle: BufferCollectionTokenGroupControlHandle,
11588 },
11589 /// Create a child [`fuchsia.sysmem2/BufferCollectionToken`]. Only one child
11590 /// (including its children) will be selected during allocation (or logical
11591 /// allocation).
11592 ///
11593 /// Before passing the client end of this token to
11594 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], completion of
11595 /// [`fuchsia.sysmem2/Node.Sync`] after
11596 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`] is required.
11597 /// Or the client can use
11598 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChildrenSync`] which
11599 /// essentially includes the `Sync`.
11600 ///
11601 /// Sending CreateChild after AllChildrenPresent is not permitted; this will
11602 /// fail the group's subtree and close the connection.
11603 ///
11604 /// After all children have been created, send AllChildrenPresent.
11605 ///
11606 /// + request `token_request` The server end of the new token channel.
11607 /// + request `rights_attenuation_mask` If ZX_RIGHT_SAME_RIGHTS, the created
11608 /// token allows the holder to get the same rights to buffers as the
11609 /// parent token (of the group) had. When the value isn't
11610 /// ZX_RIGHT_SAME_RIGHTS, the value is interpretted as a bitmask with 0
11611 /// bits ensuring those rights are attentuated, so 0xFFFFFFFF is a synonym
11612 /// for ZX_RIGHT_SAME_RIGHTS. The value 0 is not allowed and intentionally
11613 /// causes subtree failure.
11614 CreateChild {
11615 payload: BufferCollectionTokenGroupCreateChildRequest,
11616 control_handle: BufferCollectionTokenGroupControlHandle,
11617 },
11618 /// Create 1 or more child tokens at once, synchronously. In contrast to
11619 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`], no
11620 /// [`fuchsia.sysmem2/Node.Sync`] is required before passing the client end
11621 /// of a returned token to
11622 /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`].
11623 ///
11624 /// The lower-index child tokens are higher priority (attempted sooner) than
11625 /// higher-index child tokens.
11626 ///
11627 /// As per all child tokens, successful aggregation will choose exactly one
11628 /// child among all created children (across all children created across
11629 /// potentially multiple calls to
11630 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`] and
11631 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChildrenSync`]).
11632 ///
11633 /// The maximum permissible total number of children per group, and total
11634 /// number of nodes in an overall tree (from the root) are capped to limits
11635 /// which are not configurable via these protocols.
11636 ///
11637 /// Sending CreateChildrenSync after AllChildrenPresent is not permitted;
11638 /// this will fail the group's subtree and close the connection.
11639 ///
11640 /// After all children have been created, send AllChildrenPresent.
11641 ///
11642 /// + request `rights_attentuation_masks` The size of the
11643 /// `rights_attentuation_masks` determines the number of created child
11644 /// tokens. The value ZX_RIGHT_SAME_RIGHTS doesn't attenuate any rights.
11645 /// The value 0xFFFFFFFF is a synonym for ZX_RIGHT_SAME_RIGHTS. For any
11646 /// other value, each 0 bit in the mask attenuates that right.
11647 /// - response `tokens` The created child tokens.
11648 CreateChildrenSync {
11649 payload: BufferCollectionTokenGroupCreateChildrenSyncRequest,
11650 responder: BufferCollectionTokenGroupCreateChildrenSyncResponder,
11651 },
11652 /// Indicate that no more children will be created.
11653 ///
11654 /// After creating all children, the client should send
11655 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent`] to
11656 /// inform sysmem that no more children will be created, so that sysmem can
11657 /// know when it's ok to start aggregating constraints.
11658 ///
11659 /// Sending CreateChild after AllChildrenPresent is not permitted; this will
11660 /// fail the group's subtree and close the connection.
11661 ///
11662 /// If [`fuchsia.sysmem2/Node.Release`] is to be sent, it should be sent
11663 /// after `AllChildrenPresent`, else failure of the group's subtree will be
11664 /// triggered. This is intentionally not analogous to how `Release` without
11665 /// prior [`fuchsia.sysmem2/BufferCollection.SetConstraints`] doesn't cause
11666 /// subtree failure.
11667 AllChildrenPresent { control_handle: BufferCollectionTokenGroupControlHandle },
11668 /// An interaction was received which does not match any known method.
11669 #[non_exhaustive]
11670 _UnknownMethod {
11671 /// Ordinal of the method that was called.
11672 ordinal: u64,
11673 control_handle: BufferCollectionTokenGroupControlHandle,
11674 method_type: fidl::MethodType,
11675 },
11676}
11677
11678impl BufferCollectionTokenGroupRequest {
11679 #[allow(irrefutable_let_patterns)]
11680 pub fn into_sync(self) -> Option<(BufferCollectionTokenGroupSyncResponder)> {
11681 if let BufferCollectionTokenGroupRequest::Sync { responder } = self {
11682 Some((responder))
11683 } else {
11684 None
11685 }
11686 }
11687
11688 #[allow(irrefutable_let_patterns)]
11689 pub fn into_release(self) -> Option<(BufferCollectionTokenGroupControlHandle)> {
11690 if let BufferCollectionTokenGroupRequest::Release { control_handle } = self {
11691 Some((control_handle))
11692 } else {
11693 None
11694 }
11695 }
11696
11697 #[allow(irrefutable_let_patterns)]
11698 pub fn into_set_name(
11699 self,
11700 ) -> Option<(NodeSetNameRequest, BufferCollectionTokenGroupControlHandle)> {
11701 if let BufferCollectionTokenGroupRequest::SetName { payload, control_handle } = self {
11702 Some((payload, control_handle))
11703 } else {
11704 None
11705 }
11706 }
11707
11708 #[allow(irrefutable_let_patterns)]
11709 pub fn into_set_debug_client_info(
11710 self,
11711 ) -> Option<(NodeSetDebugClientInfoRequest, BufferCollectionTokenGroupControlHandle)> {
11712 if let BufferCollectionTokenGroupRequest::SetDebugClientInfo { payload, control_handle } =
11713 self
11714 {
11715 Some((payload, control_handle))
11716 } else {
11717 None
11718 }
11719 }
11720
11721 #[allow(irrefutable_let_patterns)]
11722 pub fn into_set_debug_timeout_log_deadline(
11723 self,
11724 ) -> Option<(NodeSetDebugTimeoutLogDeadlineRequest, BufferCollectionTokenGroupControlHandle)>
11725 {
11726 if let BufferCollectionTokenGroupRequest::SetDebugTimeoutLogDeadline {
11727 payload,
11728 control_handle,
11729 } = self
11730 {
11731 Some((payload, control_handle))
11732 } else {
11733 None
11734 }
11735 }
11736
11737 #[allow(irrefutable_let_patterns)]
11738 pub fn into_set_verbose_logging(self) -> Option<(BufferCollectionTokenGroupControlHandle)> {
11739 if let BufferCollectionTokenGroupRequest::SetVerboseLogging { control_handle } = self {
11740 Some((control_handle))
11741 } else {
11742 None
11743 }
11744 }
11745
11746 #[allow(irrefutable_let_patterns)]
11747 pub fn into_get_node_ref(self) -> Option<(BufferCollectionTokenGroupGetNodeRefResponder)> {
11748 if let BufferCollectionTokenGroupRequest::GetNodeRef { responder } = self {
11749 Some((responder))
11750 } else {
11751 None
11752 }
11753 }
11754
11755 #[allow(irrefutable_let_patterns)]
11756 pub fn into_is_alternate_for(
11757 self,
11758 ) -> Option<(NodeIsAlternateForRequest, BufferCollectionTokenGroupIsAlternateForResponder)>
11759 {
11760 if let BufferCollectionTokenGroupRequest::IsAlternateFor { payload, responder } = self {
11761 Some((payload, responder))
11762 } else {
11763 None
11764 }
11765 }
11766
11767 #[allow(irrefutable_let_patterns)]
11768 pub fn into_get_buffer_collection_id(
11769 self,
11770 ) -> Option<(BufferCollectionTokenGroupGetBufferCollectionIdResponder)> {
11771 if let BufferCollectionTokenGroupRequest::GetBufferCollectionId { responder } = self {
11772 Some((responder))
11773 } else {
11774 None
11775 }
11776 }
11777
11778 #[allow(irrefutable_let_patterns)]
11779 pub fn into_set_weak(self) -> Option<(BufferCollectionTokenGroupControlHandle)> {
11780 if let BufferCollectionTokenGroupRequest::SetWeak { control_handle } = self {
11781 Some((control_handle))
11782 } else {
11783 None
11784 }
11785 }
11786
11787 #[allow(irrefutable_let_patterns)]
11788 pub fn into_set_weak_ok(
11789 self,
11790 ) -> Option<(NodeSetWeakOkRequest, BufferCollectionTokenGroupControlHandle)> {
11791 if let BufferCollectionTokenGroupRequest::SetWeakOk { payload, control_handle } = self {
11792 Some((payload, control_handle))
11793 } else {
11794 None
11795 }
11796 }
11797
11798 #[allow(irrefutable_let_patterns)]
11799 pub fn into_attach_node_tracking(
11800 self,
11801 ) -> Option<(NodeAttachNodeTrackingRequest, BufferCollectionTokenGroupControlHandle)> {
11802 if let BufferCollectionTokenGroupRequest::AttachNodeTracking { payload, control_handle } =
11803 self
11804 {
11805 Some((payload, control_handle))
11806 } else {
11807 None
11808 }
11809 }
11810
11811 #[allow(irrefutable_let_patterns)]
11812 pub fn into_create_child(
11813 self,
11814 ) -> Option<(
11815 BufferCollectionTokenGroupCreateChildRequest,
11816 BufferCollectionTokenGroupControlHandle,
11817 )> {
11818 if let BufferCollectionTokenGroupRequest::CreateChild { payload, control_handle } = self {
11819 Some((payload, control_handle))
11820 } else {
11821 None
11822 }
11823 }
11824
11825 #[allow(irrefutable_let_patterns)]
11826 pub fn into_create_children_sync(
11827 self,
11828 ) -> Option<(
11829 BufferCollectionTokenGroupCreateChildrenSyncRequest,
11830 BufferCollectionTokenGroupCreateChildrenSyncResponder,
11831 )> {
11832 if let BufferCollectionTokenGroupRequest::CreateChildrenSync { payload, responder } = self {
11833 Some((payload, responder))
11834 } else {
11835 None
11836 }
11837 }
11838
11839 #[allow(irrefutable_let_patterns)]
11840 pub fn into_all_children_present(self) -> Option<(BufferCollectionTokenGroupControlHandle)> {
11841 if let BufferCollectionTokenGroupRequest::AllChildrenPresent { control_handle } = self {
11842 Some((control_handle))
11843 } else {
11844 None
11845 }
11846 }
11847
11848 /// Name of the method defined in FIDL
11849 pub fn method_name(&self) -> &'static str {
11850 match *self {
11851 BufferCollectionTokenGroupRequest::Sync { .. } => "sync",
11852 BufferCollectionTokenGroupRequest::Release { .. } => "release",
11853 BufferCollectionTokenGroupRequest::SetName { .. } => "set_name",
11854 BufferCollectionTokenGroupRequest::SetDebugClientInfo { .. } => "set_debug_client_info",
11855 BufferCollectionTokenGroupRequest::SetDebugTimeoutLogDeadline { .. } => {
11856 "set_debug_timeout_log_deadline"
11857 }
11858 BufferCollectionTokenGroupRequest::SetVerboseLogging { .. } => "set_verbose_logging",
11859 BufferCollectionTokenGroupRequest::GetNodeRef { .. } => "get_node_ref",
11860 BufferCollectionTokenGroupRequest::IsAlternateFor { .. } => "is_alternate_for",
11861 BufferCollectionTokenGroupRequest::GetBufferCollectionId { .. } => {
11862 "get_buffer_collection_id"
11863 }
11864 BufferCollectionTokenGroupRequest::SetWeak { .. } => "set_weak",
11865 BufferCollectionTokenGroupRequest::SetWeakOk { .. } => "set_weak_ok",
11866 BufferCollectionTokenGroupRequest::AttachNodeTracking { .. } => "attach_node_tracking",
11867 BufferCollectionTokenGroupRequest::CreateChild { .. } => "create_child",
11868 BufferCollectionTokenGroupRequest::CreateChildrenSync { .. } => "create_children_sync",
11869 BufferCollectionTokenGroupRequest::AllChildrenPresent { .. } => "all_children_present",
11870 BufferCollectionTokenGroupRequest::_UnknownMethod {
11871 method_type: fidl::MethodType::OneWay,
11872 ..
11873 } => "unknown one-way method",
11874 BufferCollectionTokenGroupRequest::_UnknownMethod {
11875 method_type: fidl::MethodType::TwoWay,
11876 ..
11877 } => "unknown two-way method",
11878 }
11879 }
11880}
11881
11882#[derive(Debug, Clone)]
11883pub struct BufferCollectionTokenGroupControlHandle {
11884 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
11885}
11886
11887impl fidl::endpoints::ControlHandle for BufferCollectionTokenGroupControlHandle {
11888 fn shutdown(&self) {
11889 self.inner.shutdown()
11890 }
11891
11892 fn shutdown_with_epitaph(&self, status: zx_status::Status) {
11893 self.inner.shutdown_with_epitaph(status)
11894 }
11895
11896 fn is_closed(&self) -> bool {
11897 self.inner.channel().is_closed()
11898 }
11899 fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
11900 self.inner.channel().on_closed()
11901 }
11902
11903 #[cfg(target_os = "fuchsia")]
11904 fn signal_peer(
11905 &self,
11906 clear_mask: zx::Signals,
11907 set_mask: zx::Signals,
11908 ) -> Result<(), zx_status::Status> {
11909 use fidl::Peered;
11910 self.inner.channel().signal_peer(clear_mask, set_mask)
11911 }
11912}
11913
11914impl BufferCollectionTokenGroupControlHandle {}
11915
11916#[must_use = "FIDL methods require a response to be sent"]
11917#[derive(Debug)]
11918pub struct BufferCollectionTokenGroupSyncResponder {
11919 control_handle: std::mem::ManuallyDrop<BufferCollectionTokenGroupControlHandle>,
11920 tx_id: u32,
11921}
11922
11923/// Set the the channel to be shutdown (see [`BufferCollectionTokenGroupControlHandle::shutdown`])
11924/// if the responder is dropped without sending a response, so that the client
11925/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
11926impl std::ops::Drop for BufferCollectionTokenGroupSyncResponder {
11927 fn drop(&mut self) {
11928 self.control_handle.shutdown();
11929 // Safety: drops once, never accessed again
11930 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
11931 }
11932}
11933
11934impl fidl::endpoints::Responder for BufferCollectionTokenGroupSyncResponder {
11935 type ControlHandle = BufferCollectionTokenGroupControlHandle;
11936
11937 fn control_handle(&self) -> &BufferCollectionTokenGroupControlHandle {
11938 &self.control_handle
11939 }
11940
11941 fn drop_without_shutdown(mut self) {
11942 // Safety: drops once, never accessed again due to mem::forget
11943 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
11944 // Prevent Drop from running (which would shut down the channel)
11945 std::mem::forget(self);
11946 }
11947}
11948
11949impl BufferCollectionTokenGroupSyncResponder {
11950 /// Sends a response to the FIDL transaction.
11951 ///
11952 /// Sets the channel to shutdown if an error occurs.
11953 pub fn send(self) -> Result<(), fidl::Error> {
11954 let _result = self.send_raw();
11955 if _result.is_err() {
11956 self.control_handle.shutdown();
11957 }
11958 self.drop_without_shutdown();
11959 _result
11960 }
11961
11962 /// Similar to "send" but does not shutdown the channel if an error occurs.
11963 pub fn send_no_shutdown_on_err(self) -> Result<(), fidl::Error> {
11964 let _result = self.send_raw();
11965 self.drop_without_shutdown();
11966 _result
11967 }
11968
11969 fn send_raw(&self) -> Result<(), fidl::Error> {
11970 self.control_handle.inner.send::<fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>>(
11971 fidl::encoding::Flexible::new(()),
11972 self.tx_id,
11973 0x11ac2555cf575b54,
11974 fidl::encoding::DynamicFlags::FLEXIBLE,
11975 )
11976 }
11977}
11978
11979#[must_use = "FIDL methods require a response to be sent"]
11980#[derive(Debug)]
11981pub struct BufferCollectionTokenGroupGetNodeRefResponder {
11982 control_handle: std::mem::ManuallyDrop<BufferCollectionTokenGroupControlHandle>,
11983 tx_id: u32,
11984}
11985
11986/// Set the the channel to be shutdown (see [`BufferCollectionTokenGroupControlHandle::shutdown`])
11987/// if the responder is dropped without sending a response, so that the client
11988/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
11989impl std::ops::Drop for BufferCollectionTokenGroupGetNodeRefResponder {
11990 fn drop(&mut self) {
11991 self.control_handle.shutdown();
11992 // Safety: drops once, never accessed again
11993 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
11994 }
11995}
11996
11997impl fidl::endpoints::Responder for BufferCollectionTokenGroupGetNodeRefResponder {
11998 type ControlHandle = BufferCollectionTokenGroupControlHandle;
11999
12000 fn control_handle(&self) -> &BufferCollectionTokenGroupControlHandle {
12001 &self.control_handle
12002 }
12003
12004 fn drop_without_shutdown(mut self) {
12005 // Safety: drops once, never accessed again due to mem::forget
12006 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
12007 // Prevent Drop from running (which would shut down the channel)
12008 std::mem::forget(self);
12009 }
12010}
12011
12012impl BufferCollectionTokenGroupGetNodeRefResponder {
12013 /// Sends a response to the FIDL transaction.
12014 ///
12015 /// Sets the channel to shutdown if an error occurs.
12016 pub fn send(self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
12017 let _result = self.send_raw(payload);
12018 if _result.is_err() {
12019 self.control_handle.shutdown();
12020 }
12021 self.drop_without_shutdown();
12022 _result
12023 }
12024
12025 /// Similar to "send" but does not shutdown the channel if an error occurs.
12026 pub fn send_no_shutdown_on_err(
12027 self,
12028 mut payload: NodeGetNodeRefResponse,
12029 ) -> Result<(), fidl::Error> {
12030 let _result = self.send_raw(payload);
12031 self.drop_without_shutdown();
12032 _result
12033 }
12034
12035 fn send_raw(&self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
12036 self.control_handle.inner.send::<fidl::encoding::FlexibleType<NodeGetNodeRefResponse>>(
12037 fidl::encoding::Flexible::new(&mut payload),
12038 self.tx_id,
12039 0x5b3d0e51614df053,
12040 fidl::encoding::DynamicFlags::FLEXIBLE,
12041 )
12042 }
12043}
12044
12045#[must_use = "FIDL methods require a response to be sent"]
12046#[derive(Debug)]
12047pub struct BufferCollectionTokenGroupIsAlternateForResponder {
12048 control_handle: std::mem::ManuallyDrop<BufferCollectionTokenGroupControlHandle>,
12049 tx_id: u32,
12050}
12051
12052/// Set the the channel to be shutdown (see [`BufferCollectionTokenGroupControlHandle::shutdown`])
12053/// if the responder is dropped without sending a response, so that the client
12054/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
12055impl std::ops::Drop for BufferCollectionTokenGroupIsAlternateForResponder {
12056 fn drop(&mut self) {
12057 self.control_handle.shutdown();
12058 // Safety: drops once, never accessed again
12059 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
12060 }
12061}
12062
12063impl fidl::endpoints::Responder for BufferCollectionTokenGroupIsAlternateForResponder {
12064 type ControlHandle = BufferCollectionTokenGroupControlHandle;
12065
12066 fn control_handle(&self) -> &BufferCollectionTokenGroupControlHandle {
12067 &self.control_handle
12068 }
12069
12070 fn drop_without_shutdown(mut self) {
12071 // Safety: drops once, never accessed again due to mem::forget
12072 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
12073 // Prevent Drop from running (which would shut down the channel)
12074 std::mem::forget(self);
12075 }
12076}
12077
12078impl BufferCollectionTokenGroupIsAlternateForResponder {
12079 /// Sends a response to the FIDL transaction.
12080 ///
12081 /// Sets the channel to shutdown if an error occurs.
12082 pub fn send(
12083 self,
12084 mut result: Result<&NodeIsAlternateForResponse, Error>,
12085 ) -> Result<(), fidl::Error> {
12086 let _result = self.send_raw(result);
12087 if _result.is_err() {
12088 self.control_handle.shutdown();
12089 }
12090 self.drop_without_shutdown();
12091 _result
12092 }
12093
12094 /// Similar to "send" but does not shutdown the channel if an error occurs.
12095 pub fn send_no_shutdown_on_err(
12096 self,
12097 mut result: Result<&NodeIsAlternateForResponse, Error>,
12098 ) -> Result<(), fidl::Error> {
12099 let _result = self.send_raw(result);
12100 self.drop_without_shutdown();
12101 _result
12102 }
12103
12104 fn send_raw(
12105 &self,
12106 mut result: Result<&NodeIsAlternateForResponse, Error>,
12107 ) -> Result<(), fidl::Error> {
12108 self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
12109 NodeIsAlternateForResponse,
12110 Error,
12111 >>(
12112 fidl::encoding::FlexibleResult::new(result),
12113 self.tx_id,
12114 0x3a58e00157e0825,
12115 fidl::encoding::DynamicFlags::FLEXIBLE,
12116 )
12117 }
12118}
12119
12120#[must_use = "FIDL methods require a response to be sent"]
12121#[derive(Debug)]
12122pub struct BufferCollectionTokenGroupGetBufferCollectionIdResponder {
12123 control_handle: std::mem::ManuallyDrop<BufferCollectionTokenGroupControlHandle>,
12124 tx_id: u32,
12125}
12126
12127/// Set the the channel to be shutdown (see [`BufferCollectionTokenGroupControlHandle::shutdown`])
12128/// if the responder is dropped without sending a response, so that the client
12129/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
12130impl std::ops::Drop for BufferCollectionTokenGroupGetBufferCollectionIdResponder {
12131 fn drop(&mut self) {
12132 self.control_handle.shutdown();
12133 // Safety: drops once, never accessed again
12134 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
12135 }
12136}
12137
12138impl fidl::endpoints::Responder for BufferCollectionTokenGroupGetBufferCollectionIdResponder {
12139 type ControlHandle = BufferCollectionTokenGroupControlHandle;
12140
12141 fn control_handle(&self) -> &BufferCollectionTokenGroupControlHandle {
12142 &self.control_handle
12143 }
12144
12145 fn drop_without_shutdown(mut self) {
12146 // Safety: drops once, never accessed again due to mem::forget
12147 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
12148 // Prevent Drop from running (which would shut down the channel)
12149 std::mem::forget(self);
12150 }
12151}
12152
12153impl BufferCollectionTokenGroupGetBufferCollectionIdResponder {
12154 /// Sends a response to the FIDL transaction.
12155 ///
12156 /// Sets the channel to shutdown if an error occurs.
12157 pub fn send(self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
12158 let _result = self.send_raw(payload);
12159 if _result.is_err() {
12160 self.control_handle.shutdown();
12161 }
12162 self.drop_without_shutdown();
12163 _result
12164 }
12165
12166 /// Similar to "send" but does not shutdown the channel if an error occurs.
12167 pub fn send_no_shutdown_on_err(
12168 self,
12169 mut payload: &NodeGetBufferCollectionIdResponse,
12170 ) -> Result<(), fidl::Error> {
12171 let _result = self.send_raw(payload);
12172 self.drop_without_shutdown();
12173 _result
12174 }
12175
12176 fn send_raw(&self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
12177 self.control_handle
12178 .inner
12179 .send::<fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>>(
12180 fidl::encoding::Flexible::new(payload),
12181 self.tx_id,
12182 0x77d19a494b78ba8c,
12183 fidl::encoding::DynamicFlags::FLEXIBLE,
12184 )
12185 }
12186}
12187
12188#[must_use = "FIDL methods require a response to be sent"]
12189#[derive(Debug)]
12190pub struct BufferCollectionTokenGroupCreateChildrenSyncResponder {
12191 control_handle: std::mem::ManuallyDrop<BufferCollectionTokenGroupControlHandle>,
12192 tx_id: u32,
12193}
12194
12195/// Set the the channel to be shutdown (see [`BufferCollectionTokenGroupControlHandle::shutdown`])
12196/// if the responder is dropped without sending a response, so that the client
12197/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
12198impl std::ops::Drop for BufferCollectionTokenGroupCreateChildrenSyncResponder {
12199 fn drop(&mut self) {
12200 self.control_handle.shutdown();
12201 // Safety: drops once, never accessed again
12202 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
12203 }
12204}
12205
12206impl fidl::endpoints::Responder for BufferCollectionTokenGroupCreateChildrenSyncResponder {
12207 type ControlHandle = BufferCollectionTokenGroupControlHandle;
12208
12209 fn control_handle(&self) -> &BufferCollectionTokenGroupControlHandle {
12210 &self.control_handle
12211 }
12212
12213 fn drop_without_shutdown(mut self) {
12214 // Safety: drops once, never accessed again due to mem::forget
12215 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
12216 // Prevent Drop from running (which would shut down the channel)
12217 std::mem::forget(self);
12218 }
12219}
12220
12221impl BufferCollectionTokenGroupCreateChildrenSyncResponder {
12222 /// Sends a response to the FIDL transaction.
12223 ///
12224 /// Sets the channel to shutdown if an error occurs.
12225 pub fn send(
12226 self,
12227 mut payload: BufferCollectionTokenGroupCreateChildrenSyncResponse,
12228 ) -> Result<(), fidl::Error> {
12229 let _result = self.send_raw(payload);
12230 if _result.is_err() {
12231 self.control_handle.shutdown();
12232 }
12233 self.drop_without_shutdown();
12234 _result
12235 }
12236
12237 /// Similar to "send" but does not shutdown the channel if an error occurs.
12238 pub fn send_no_shutdown_on_err(
12239 self,
12240 mut payload: BufferCollectionTokenGroupCreateChildrenSyncResponse,
12241 ) -> Result<(), fidl::Error> {
12242 let _result = self.send_raw(payload);
12243 self.drop_without_shutdown();
12244 _result
12245 }
12246
12247 fn send_raw(
12248 &self,
12249 mut payload: BufferCollectionTokenGroupCreateChildrenSyncResponse,
12250 ) -> Result<(), fidl::Error> {
12251 self.control_handle.inner.send::<fidl::encoding::FlexibleType<
12252 BufferCollectionTokenGroupCreateChildrenSyncResponse,
12253 >>(
12254 fidl::encoding::Flexible::new(&mut payload),
12255 self.tx_id,
12256 0x15dea448c536070a,
12257 fidl::encoding::DynamicFlags::FLEXIBLE,
12258 )
12259 }
12260}
12261
12262#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
12263pub struct NodeMarker;
12264
12265impl fidl::endpoints::ProtocolMarker for NodeMarker {
12266 type Proxy = NodeProxy;
12267 type RequestStream = NodeRequestStream;
12268 #[cfg(target_os = "fuchsia")]
12269 type SynchronousProxy = NodeSynchronousProxy;
12270
12271 const DEBUG_NAME: &'static str = "(anonymous) Node";
12272}
12273pub type NodeIsAlternateForResult = Result<NodeIsAlternateForResponse, Error>;
12274
12275pub trait NodeProxyInterface: Send + Sync {
12276 type SyncResponseFut: std::future::Future<Output = Result<(), fidl::Error>> + Send;
12277 fn r#sync(&self) -> Self::SyncResponseFut;
12278 fn r#release(&self) -> Result<(), fidl::Error>;
12279 fn r#set_name(&self, payload: &NodeSetNameRequest) -> Result<(), fidl::Error>;
12280 fn r#set_debug_client_info(
12281 &self,
12282 payload: &NodeSetDebugClientInfoRequest,
12283 ) -> Result<(), fidl::Error>;
12284 fn r#set_debug_timeout_log_deadline(
12285 &self,
12286 payload: &NodeSetDebugTimeoutLogDeadlineRequest,
12287 ) -> Result<(), fidl::Error>;
12288 fn r#set_verbose_logging(&self) -> Result<(), fidl::Error>;
12289 type GetNodeRefResponseFut: std::future::Future<Output = Result<NodeGetNodeRefResponse, fidl::Error>>
12290 + Send;
12291 fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut;
12292 type IsAlternateForResponseFut: std::future::Future<Output = Result<NodeIsAlternateForResult, fidl::Error>>
12293 + Send;
12294 fn r#is_alternate_for(
12295 &self,
12296 payload: NodeIsAlternateForRequest,
12297 ) -> Self::IsAlternateForResponseFut;
12298 type GetBufferCollectionIdResponseFut: std::future::Future<Output = Result<NodeGetBufferCollectionIdResponse, fidl::Error>>
12299 + Send;
12300 fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut;
12301 fn r#set_weak(&self) -> Result<(), fidl::Error>;
12302 fn r#set_weak_ok(&self, payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error>;
12303 fn r#attach_node_tracking(
12304 &self,
12305 payload: NodeAttachNodeTrackingRequest,
12306 ) -> Result<(), fidl::Error>;
12307}
12308#[derive(Debug)]
12309#[cfg(target_os = "fuchsia")]
12310pub struct NodeSynchronousProxy {
12311 client: fidl::client::sync::Client,
12312}
12313
12314#[cfg(target_os = "fuchsia")]
12315impl fidl::endpoints::SynchronousProxy for NodeSynchronousProxy {
12316 type Proxy = NodeProxy;
12317 type Protocol = NodeMarker;
12318
12319 fn from_channel(inner: fidl::Channel) -> Self {
12320 Self::new(inner)
12321 }
12322
12323 fn into_channel(self) -> fidl::Channel {
12324 self.client.into_channel()
12325 }
12326
12327 fn as_channel(&self) -> &fidl::Channel {
12328 self.client.as_channel()
12329 }
12330}
12331
12332#[cfg(target_os = "fuchsia")]
12333impl NodeSynchronousProxy {
12334 pub fn new(channel: fidl::Channel) -> Self {
12335 let protocol_name = <NodeMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
12336 Self { client: fidl::client::sync::Client::new(channel, protocol_name) }
12337 }
12338
12339 pub fn into_channel(self) -> fidl::Channel {
12340 self.client.into_channel()
12341 }
12342
12343 /// Waits until an event arrives and returns it. It is safe for other
12344 /// threads to make concurrent requests while waiting for an event.
12345 pub fn wait_for_event(&self, deadline: zx::MonotonicInstant) -> Result<NodeEvent, fidl::Error> {
12346 NodeEvent::decode(self.client.wait_for_event(deadline)?)
12347 }
12348
12349 /// Ensure that previous messages have been received server side. This is
12350 /// particularly useful after previous messages that created new tokens,
12351 /// because a token must be known to the sysmem server before sending the
12352 /// token to another participant.
12353 ///
12354 /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
12355 /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
12356 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
12357 /// to mitigate the possibility of a hostile/fake
12358 /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
12359 /// Another way is to pass the token to
12360 /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
12361 /// the token as part of exchanging it for a
12362 /// [`fuchsia.sysmem2/BufferCollection`] channel, and
12363 /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
12364 /// of stalling.
12365 ///
12366 /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
12367 /// and then starting and completing a `Sync`, it's then safe to send the
12368 /// `BufferCollectionToken` client ends to other participants knowing the
12369 /// server will recognize the tokens when they're sent by the other
12370 /// participants to sysmem in a
12371 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
12372 /// efficient way to create tokens while avoiding unnecessary round trips.
12373 ///
12374 /// Other options include waiting for each
12375 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
12376 /// individually (using separate call to `Sync` after each), or calling
12377 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
12378 /// converted to a `BufferCollection` via
12379 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
12380 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
12381 /// the sync step and can create multiple tokens at once.
12382 pub fn r#sync(&self, ___deadline: zx::MonotonicInstant) -> Result<(), fidl::Error> {
12383 let _response = self.client.send_query::<
12384 fidl::encoding::EmptyPayload,
12385 fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
12386 >(
12387 (),
12388 0x11ac2555cf575b54,
12389 fidl::encoding::DynamicFlags::FLEXIBLE,
12390 ___deadline,
12391 )?
12392 .into_result::<NodeMarker>("sync")?;
12393 Ok(_response)
12394 }
12395
12396 /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
12397 ///
12398 /// Normally a participant will convert a `BufferCollectionToken` into a
12399 /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
12400 /// `Release` via the token (and then close the channel immediately or
12401 /// shortly later in response to server closing the server end), which
12402 /// avoids causing buffer collection failure. Without a prior `Release`,
12403 /// closing the `BufferCollectionToken` client end will cause buffer
12404 /// collection failure.
12405 ///
12406 /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
12407 ///
12408 /// By default the server handles unexpected closure of a
12409 /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
12410 /// first) by failing the buffer collection. Partly this is to expedite
12411 /// closing VMO handles to reclaim memory when any participant fails. If a
12412 /// participant would like to cleanly close a `BufferCollection` without
12413 /// causing buffer collection failure, the participant can send `Release`
12414 /// before closing the `BufferCollection` client end. The `Release` can
12415 /// occur before or after `SetConstraints`. If before `SetConstraints`, the
12416 /// buffer collection won't require constraints from this node in order to
12417 /// allocate. If after `SetConstraints`, the constraints are retained and
12418 /// aggregated, despite the lack of `BufferCollection` connection at the
12419 /// time of constraints aggregation.
12420 ///
12421 /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
12422 ///
12423 /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
12424 /// end (without `Release` first) will trigger failure of the buffer
12425 /// collection. To close a `BufferCollectionTokenGroup` channel without
12426 /// failing the buffer collection, ensure that AllChildrenPresent() has been
12427 /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
12428 /// client end.
12429 ///
12430 /// If `Release` occurs before
12431 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
12432 /// buffer collection will fail (triggered by reception of `Release` without
12433 /// prior `AllChildrenPresent`). This is intentionally not analogous to how
12434 /// [`fuchsia.sysmem2/BufferCollection.Release`] without
12435 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
12436 /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
12437 /// close requires `AllChildrenPresent` (if not already sent), then
12438 /// `Release`, then close client end.
12439 ///
12440 /// If `Release` occurs after `AllChildrenPresent`, the children and all
12441 /// their constraints remain intact (just as they would if the
12442 /// `BufferCollectionTokenGroup` channel had remained open), and the client
12443 /// end close doesn't trigger buffer collection failure.
12444 ///
12445 /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
12446 ///
12447 /// For brevity, the per-channel-protocol paragraphs above ignore the
12448 /// separate failure domain created by
12449 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
12450 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
12451 /// unexpectedly closes (without `Release` first) and that client end is
12452 /// under a failure domain, instead of failing the whole buffer collection,
12453 /// the failure domain is failed, but the buffer collection itself is
12454 /// isolated from failure of the failure domain. Such failure domains can be
12455 /// nested, in which case only the inner-most failure domain in which the
12456 /// `Node` resides fails.
12457 pub fn r#release(&self) -> Result<(), fidl::Error> {
12458 self.client.send::<fidl::encoding::EmptyPayload>(
12459 (),
12460 0x6a5cae7d6d6e04c6,
12461 fidl::encoding::DynamicFlags::FLEXIBLE,
12462 )
12463 }
12464
12465 /// Set a name for VMOs in this buffer collection.
12466 ///
12467 /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
12468 /// will be truncated to fit. The name of the vmo will be suffixed with the
12469 /// buffer index within the collection (if the suffix fits within
12470 /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
12471 /// listed in the inspect data.
12472 ///
12473 /// The name only affects VMOs allocated after the name is set; this call
12474 /// does not rename existing VMOs. If multiple clients set different names
12475 /// then the larger priority value will win. Setting a new name with the
12476 /// same priority as a prior name doesn't change the name.
12477 ///
12478 /// All table fields are currently required.
12479 ///
12480 /// + request `priority` The name is only set if this is the first `SetName`
12481 /// or if `priority` is greater than any previous `priority` value in
12482 /// prior `SetName` calls across all `Node`(s) of this buffer collection.
12483 /// + request `name` The name for VMOs created under this buffer collection.
12484 pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
12485 self.client.send::<NodeSetNameRequest>(
12486 payload,
12487 0xb41f1624f48c1e9,
12488 fidl::encoding::DynamicFlags::FLEXIBLE,
12489 )
12490 }
12491
12492 /// Set information about the current client that can be used by sysmem to
12493 /// help diagnose leaking memory and allocation stalls waiting for a
12494 /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
12495 ///
12496 /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
12497 /// `Node`(s) derived from this `Node`, unless overriden by
12498 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
12499 /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
12500 ///
12501 /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
12502 /// `Allocator` is the most efficient way to ensure that all
12503 /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
12504 /// set, and is also more efficient than separately sending the same debug
12505 /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
12506 /// created [`fuchsia.sysmem2/Node`].
12507 ///
12508 /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
12509 /// indicate which client is closing their channel first, leading to subtree
12510 /// failure (which can be normal if the purpose of the subtree is over, but
12511 /// if happening earlier than expected, the client-channel-specific name can
12512 /// help diagnose where the failure is first coming from, from sysmem's
12513 /// point of view).
12514 ///
12515 /// All table fields are currently required.
12516 ///
12517 /// + request `name` This can be an arbitrary string, but the current
12518 /// process name (see `fsl::GetCurrentProcessName`) is a good default.
12519 /// + request `id` This can be an arbitrary id, but the current process ID
12520 /// (see `fsl::GetCurrentProcessKoid`) is a good default.
12521 pub fn r#set_debug_client_info(
12522 &self,
12523 mut payload: &NodeSetDebugClientInfoRequest,
12524 ) -> Result<(), fidl::Error> {
12525 self.client.send::<NodeSetDebugClientInfoRequest>(
12526 payload,
12527 0x5cde8914608d99b1,
12528 fidl::encoding::DynamicFlags::FLEXIBLE,
12529 )
12530 }
12531
12532 /// Sysmem logs a warning if sysmem hasn't seen
12533 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
12534 /// within 5 seconds after creation of a new collection.
12535 ///
12536 /// Clients can call this method to change when the log is printed. If
12537 /// multiple client set the deadline, it's unspecified which deadline will
12538 /// take effect.
12539 ///
12540 /// In most cases the default works well.
12541 ///
12542 /// All table fields are currently required.
12543 ///
12544 /// + request `deadline` The time at which sysmem will start trying to log
12545 /// the warning, unless all constraints are with sysmem by then.
12546 pub fn r#set_debug_timeout_log_deadline(
12547 &self,
12548 mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
12549 ) -> Result<(), fidl::Error> {
12550 self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
12551 payload,
12552 0x716b0af13d5c0806,
12553 fidl::encoding::DynamicFlags::FLEXIBLE,
12554 )
12555 }
12556
12557 /// This enables verbose logging for the buffer collection.
12558 ///
12559 /// Verbose logging includes constraints set via
12560 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
12561 /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
12562 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
12563 /// the tree of `Node`(s).
12564 ///
12565 /// Normally sysmem prints only a single line complaint when aggregation
12566 /// fails, with just the specific detailed reason that aggregation failed,
12567 /// with little surrounding context. While this is often enough to diagnose
12568 /// a problem if only a small change was made and everything was working
12569 /// before the small change, it's often not particularly helpful for getting
12570 /// a new buffer collection to work for the first time. Especially with
12571 /// more complex trees of nodes, involving things like
12572 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
12573 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
12574 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
12575 /// subtrees of nodes, verbose logging may help in diagnosing what the tree
12576 /// looks like and why it's failing a logical allocation, or why a tree or
12577 /// subtree is failing sooner than expected.
12578 ///
12579 /// The intent of the extra logging is to be acceptable from a performance
12580 /// point of view, under the assumption that verbose logging is only enabled
12581 /// on a low number of buffer collections. If we're not tracking down a bug,
12582 /// we shouldn't send this message.
12583 pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
12584 self.client.send::<fidl::encoding::EmptyPayload>(
12585 (),
12586 0x5209c77415b4dfad,
12587 fidl::encoding::DynamicFlags::FLEXIBLE,
12588 )
12589 }
12590
12591 /// This gets a handle that can be used as a parameter to
12592 /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
12593 /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
12594 /// client obtained this handle from this `Node`.
12595 ///
12596 /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
12597 /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
12598 /// despite the two calls typically being on different channels.
12599 ///
12600 /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
12601 ///
12602 /// All table fields are currently required.
12603 ///
12604 /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
12605 /// different `Node` channel, to prove that the client obtained the handle
12606 /// from this `Node`.
12607 pub fn r#get_node_ref(
12608 &self,
12609 ___deadline: zx::MonotonicInstant,
12610 ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
12611 let _response = self.client.send_query::<
12612 fidl::encoding::EmptyPayload,
12613 fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
12614 >(
12615 (),
12616 0x5b3d0e51614df053,
12617 fidl::encoding::DynamicFlags::FLEXIBLE,
12618 ___deadline,
12619 )?
12620 .into_result::<NodeMarker>("get_node_ref")?;
12621 Ok(_response)
12622 }
12623
12624 /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
12625 /// rooted at a different child token of a common parent
12626 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
12627 /// passed-in `node_ref`.
12628 ///
12629 /// This call is for assisting with admission control de-duplication, and
12630 /// with debugging.
12631 ///
12632 /// The `node_ref` must be obtained using
12633 /// [`fuchsia.sysmem2/Node.GetNodeRef`].
12634 ///
12635 /// The `node_ref` can be a duplicated handle; it's not necessary to call
12636 /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
12637 ///
12638 /// If a calling token may not actually be a valid token at all due to a
12639 /// potentially hostile/untrusted provider of the token, call
12640 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
12641 /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
12642 /// never responds due to a calling token not being a real token (not really
12643 /// talking to sysmem). Another option is to call
12644 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
12645 /// which also validates the token along with converting it to a
12646 /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
12647 ///
12648 /// All table fields are currently required.
12649 ///
12650 /// - response `is_alternate`
12651 /// - true: The first parent node in common between the calling node and
12652 /// the `node_ref` `Node` is a `BufferCollectionTokenGroup`. This means
12653 /// that the calling `Node` and the `node_ref` `Node` will not have both
12654 /// their constraints apply - rather sysmem will choose one or the other
12655 /// of the constraints - never both. This is because only one child of
12656 /// a `BufferCollectionTokenGroup` is selected during logical
12657 /// allocation, with only that one child's subtree contributing to
12658 /// constraints aggregation.
12659 /// - false: The first parent node in common between the calling `Node`
12660 /// and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
12661 /// Currently, this means the first parent node in common is a
12662 /// `BufferCollectionToken` or `BufferCollection` (regardless of not
12663 /// `Release`ed). This means that the calling `Node` and the `node_ref`
12664 /// `Node` may have both their constraints apply during constraints
12665 /// aggregation of the logical allocation, if both `Node`(s) are
12666 /// selected by any parent `BufferCollectionTokenGroup`(s) involved. In
12667 /// this case, there is no `BufferCollectionTokenGroup` that will
12668 /// directly prevent the two `Node`(s) from both being selected and
12669 /// their constraints both aggregated, but even when false, one or both
12670 /// `Node`(s) may still be eliminated from consideration if one or both
12671 /// `Node`(s) has a direct or indirect parent
12672 /// `BufferCollectionTokenGroup` which selects a child subtree other
12673 /// than the subtree containing the calling `Node` or `node_ref` `Node`.
12674 /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
12675 /// associated with the same buffer collection as the calling `Node`.
12676 /// Another reason for this error is if the `node_ref` is an
12677 /// [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
12678 /// a real `node_ref` obtained from `GetNodeRef`.
12679 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
12680 /// `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
12681 /// the needed rights expected on a real `node_ref`.
12682 /// * No other failing status codes are returned by this call. However,
12683 /// sysmem may add additional codes in future, so the client should have
12684 /// sensible default handling for any failing status code.
12685 pub fn r#is_alternate_for(
12686 &self,
12687 mut payload: NodeIsAlternateForRequest,
12688 ___deadline: zx::MonotonicInstant,
12689 ) -> Result<NodeIsAlternateForResult, fidl::Error> {
12690 let _response = self.client.send_query::<
12691 NodeIsAlternateForRequest,
12692 fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
12693 >(
12694 &mut payload,
12695 0x3a58e00157e0825,
12696 fidl::encoding::DynamicFlags::FLEXIBLE,
12697 ___deadline,
12698 )?
12699 .into_result::<NodeMarker>("is_alternate_for")?;
12700 Ok(_response.map(|x| x))
12701 }
12702
12703 /// Get the buffer collection ID. This ID is also available from
12704 /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
12705 /// within the collection).
12706 ///
12707 /// This call is mainly useful in situations where we can't convey a
12708 /// [`fuchsia.sysmem2/BufferCollectionToken`] or
12709 /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
12710 /// handle, which can be joined back up with a `BufferCollection` client end
12711 /// that was created via a different path. Prefer to convey a
12712 /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
12713 ///
12714 /// Trusting a `buffer_collection_id` value from a source other than sysmem
12715 /// is analogous to trusting a koid value from a source other than zircon.
12716 /// Both should be avoided unless really necessary, and both require
12717 /// caution. In some situations it may be reasonable to refer to a
12718 /// pre-established `BufferCollection` by `buffer_collection_id` via a
12719 /// protocol for efficiency reasons, but an incoming value purporting to be
12720 /// a `buffer_collection_id` is not sufficient alone to justify granting the
12721 /// sender of the `buffer_collection_id` any capability. The sender must
12722 /// first prove to a receiver that the sender has/had a VMO or has/had a
12723 /// `BufferCollectionToken` to the same collection by sending a handle that
12724 /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
12725 /// `buffer_collection_id` value. The receiver should take care to avoid
12726 /// assuming that a sender had a `BufferCollectionToken` in cases where the
12727 /// sender has only proven that the sender had a VMO.
12728 ///
12729 /// - response `buffer_collection_id` This ID is unique per buffer
12730 /// collection per boot. Each buffer is uniquely identified by the
12731 /// `buffer_collection_id` and `buffer_index` together.
12732 pub fn r#get_buffer_collection_id(
12733 &self,
12734 ___deadline: zx::MonotonicInstant,
12735 ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
12736 let _response = self.client.send_query::<
12737 fidl::encoding::EmptyPayload,
12738 fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
12739 >(
12740 (),
12741 0x77d19a494b78ba8c,
12742 fidl::encoding::DynamicFlags::FLEXIBLE,
12743 ___deadline,
12744 )?
12745 .into_result::<NodeMarker>("get_buffer_collection_id")?;
12746 Ok(_response)
12747 }
12748
12749 /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
12750 /// created after this message to weak, which means that a client's `Node`
12751 /// client end (or a child created after this message) is not alone
12752 /// sufficient to keep allocated VMOs alive.
12753 ///
12754 /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
12755 /// `close_weak_asap`.
12756 ///
12757 /// This message is only permitted before the `Node` becomes ready for
12758 /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
12759 /// * `BufferCollectionToken`: any time
12760 /// * `BufferCollection`: before `SetConstraints`
12761 /// * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
12762 ///
12763 /// Currently, no conversion from strong `Node` to weak `Node` after ready
12764 /// for allocation is provided, but a client can simulate that by creating
12765 /// an additional `Node` before allocation and setting that additional
12766 /// `Node` to weak, and then potentially at some point later sending
12767 /// `Release` and closing the client end of the client's strong `Node`, but
12768 /// keeping the client's weak `Node`.
12769 ///
12770 /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
12771 /// collection failure (all `Node` client end(s) will see
12772 /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
12773 /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
12774 /// this situation until all `Node`(s) are ready for allocation. For initial
12775 /// allocation to succeed, at least one strong `Node` is required to exist
12776 /// at allocation time, but after that client receives VMO handles, that
12777 /// client can `BufferCollection.Release` and close the client end without
12778 /// causing this type of failure.
12779 ///
12780 /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
12781 /// imply `SetWeakOk` with `for_children_also` true, which can be sent
12782 /// separately as appropriate.
12783 pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
12784 self.client.send::<fidl::encoding::EmptyPayload>(
12785 (),
12786 0x22dd3ea514eeffe1,
12787 fidl::encoding::DynamicFlags::FLEXIBLE,
12788 )
12789 }
12790
12791 /// This indicates to sysmem that the client is prepared to pay attention to
12792 /// `close_weak_asap`.
12793 ///
12794 /// If sent, this message must be before
12795 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
12796 ///
12797 /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
12798 /// send this message before `WaitForAllBuffersAllocated`, or a parent
12799 /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
12800 /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
12801 /// trigger buffer collection failure.
12802 ///
12803 /// This message is necessary because weak sysmem VMOs have not always been
12804 /// a thing, so older clients are not aware of the need to pay attention to
12805 /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
12806 /// sysmem weak VMO handles asap. By having this message and requiring
12807 /// participants to indicate their acceptance of this aspect of the overall
12808 /// protocol, we avoid situations where an older client is delivered a weak
12809 /// VMO without any way for sysmem to get that VMO to close quickly later
12810 /// (and on a per-buffer basis).
12811 ///
12812 /// A participant that doesn't handle `close_weak_asap` and also doesn't
12813 /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
12814 /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
12815 /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
12816 /// same participant has a child/delegate which does retrieve VMOs, that
12817 /// child/delegate will need to send `SetWeakOk` before
12818 /// `WaitForAllBuffersAllocated`.
12819 ///
12820 /// + request `for_child_nodes_also` If present and true, this means direct
12821 /// child nodes of this node created after this message plus all
12822 /// descendants of those nodes will behave as if `SetWeakOk` was sent on
12823 /// those nodes. Any child node of this node that was created before this
12824 /// message is not included. This setting is "sticky" in the sense that a
12825 /// subsequent `SetWeakOk` without this bool set to true does not reset
12826 /// the server-side bool. If this creates a problem for a participant, a
12827 /// workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
12828 /// tokens instead, as appropriate. A participant should only set
12829 /// `for_child_nodes_also` true if the participant can really promise to
12830 /// obey `close_weak_asap` both for its own weak VMO handles, and for all
12831 /// weak VMO handles held by participants holding the corresponding child
12832 /// `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
12833 /// which are using sysmem(1) can be weak, despite the clients of those
12834 /// sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
12835 /// direct way to find out about `close_weak_asap`. This only applies to
12836 /// descendents of this `Node` which are using sysmem(1), not to this
12837 /// `Node` when converted directly from a sysmem2 token to a sysmem(1)
12838 /// token, which will fail allocation unless an ancestor of this `Node`
12839 /// specified `for_child_nodes_also` true.
12840 pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
12841 self.client.send::<NodeSetWeakOkRequest>(
12842 &mut payload,
12843 0x38a44fc4d7724be9,
12844 fidl::encoding::DynamicFlags::FLEXIBLE,
12845 )
12846 }
12847
12848 /// The server_end will be closed after this `Node` and any child nodes have
12849 /// have released their buffer counts, making those counts available for
12850 /// reservation by a different `Node` via
12851 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
12852 ///
12853 /// The `Node` buffer counts may not be released until the entire tree of
12854 /// `Node`(s) is closed or failed, because
12855 /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
12856 /// does not immediately un-reserve the `Node` buffer counts. Instead, the
12857 /// `Node` buffer counts remain reserved until the orphaned node is later
12858 /// cleaned up.
12859 ///
12860 /// If the `Node` exceeds a fairly large number of attached eventpair server
12861 /// ends, a log message will indicate this and the `Node` (and the
12862 /// appropriate) sub-tree will fail.
12863 ///
12864 /// The `server_end` will remain open when
12865 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
12866 /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
12867 /// [`fuchsia.sysmem2/BufferCollection`].
12868 ///
12869 /// This message can also be used with a
12870 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
12871 pub fn r#attach_node_tracking(
12872 &self,
12873 mut payload: NodeAttachNodeTrackingRequest,
12874 ) -> Result<(), fidl::Error> {
12875 self.client.send::<NodeAttachNodeTrackingRequest>(
12876 &mut payload,
12877 0x3f22f2a293d3cdac,
12878 fidl::encoding::DynamicFlags::FLEXIBLE,
12879 )
12880 }
12881}
12882
12883#[cfg(target_os = "fuchsia")]
12884impl From<NodeSynchronousProxy> for zx::NullableHandle {
12885 fn from(value: NodeSynchronousProxy) -> Self {
12886 value.into_channel().into()
12887 }
12888}
12889
12890#[cfg(target_os = "fuchsia")]
12891impl From<fidl::Channel> for NodeSynchronousProxy {
12892 fn from(value: fidl::Channel) -> Self {
12893 Self::new(value)
12894 }
12895}
12896
12897#[cfg(target_os = "fuchsia")]
12898impl fidl::endpoints::FromClient for NodeSynchronousProxy {
12899 type Protocol = NodeMarker;
12900
12901 fn from_client(value: fidl::endpoints::ClientEnd<NodeMarker>) -> Self {
12902 Self::new(value.into_channel())
12903 }
12904}
12905
12906#[derive(Debug, Clone)]
12907pub struct NodeProxy {
12908 client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
12909}
12910
12911impl fidl::endpoints::Proxy for NodeProxy {
12912 type Protocol = NodeMarker;
12913
12914 fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
12915 Self::new(inner)
12916 }
12917
12918 fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
12919 self.client.into_channel().map_err(|client| Self { client })
12920 }
12921
12922 fn as_channel(&self) -> &::fidl::AsyncChannel {
12923 self.client.as_channel()
12924 }
12925}
12926
12927impl NodeProxy {
12928 /// Create a new Proxy for fuchsia.sysmem2/Node.
12929 pub fn new(channel: ::fidl::AsyncChannel) -> Self {
12930 let protocol_name = <NodeMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
12931 Self { client: fidl::client::Client::new(channel, protocol_name) }
12932 }
12933
12934 /// Get a Stream of events from the remote end of the protocol.
12935 ///
12936 /// # Panics
12937 ///
12938 /// Panics if the event stream was already taken.
12939 pub fn take_event_stream(&self) -> NodeEventStream {
12940 NodeEventStream { event_receiver: self.client.take_event_receiver() }
12941 }
12942
12943 /// Ensure that previous messages have been received server side. This is
12944 /// particularly useful after previous messages that created new tokens,
12945 /// because a token must be known to the sysmem server before sending the
12946 /// token to another participant.
12947 ///
12948 /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
12949 /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
12950 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
12951 /// to mitigate the possibility of a hostile/fake
12952 /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
12953 /// Another way is to pass the token to
12954 /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
12955 /// the token as part of exchanging it for a
12956 /// [`fuchsia.sysmem2/BufferCollection`] channel, and
12957 /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
12958 /// of stalling.
12959 ///
12960 /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
12961 /// and then starting and completing a `Sync`, it's then safe to send the
12962 /// `BufferCollectionToken` client ends to other participants knowing the
12963 /// server will recognize the tokens when they're sent by the other
12964 /// participants to sysmem in a
12965 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
12966 /// efficient way to create tokens while avoiding unnecessary round trips.
12967 ///
12968 /// Other options include waiting for each
12969 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
12970 /// individually (using separate call to `Sync` after each), or calling
12971 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
12972 /// converted to a `BufferCollection` via
12973 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
12974 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
12975 /// the sync step and can create multiple tokens at once.
12976 pub fn r#sync(
12977 &self,
12978 ) -> fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect> {
12979 NodeProxyInterface::r#sync(self)
12980 }
12981
12982 /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
12983 ///
12984 /// Normally a participant will convert a `BufferCollectionToken` into a
12985 /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
12986 /// `Release` via the token (and then close the channel immediately or
12987 /// shortly later in response to server closing the server end), which
12988 /// avoids causing buffer collection failure. Without a prior `Release`,
12989 /// closing the `BufferCollectionToken` client end will cause buffer
12990 /// collection failure.
12991 ///
12992 /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
12993 ///
12994 /// By default the server handles unexpected closure of a
12995 /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
12996 /// first) by failing the buffer collection. Partly this is to expedite
12997 /// closing VMO handles to reclaim memory when any participant fails. If a
12998 /// participant would like to cleanly close a `BufferCollection` without
12999 /// causing buffer collection failure, the participant can send `Release`
13000 /// before closing the `BufferCollection` client end. The `Release` can
13001 /// occur before or after `SetConstraints`. If before `SetConstraints`, the
13002 /// buffer collection won't require constraints from this node in order to
13003 /// allocate. If after `SetConstraints`, the constraints are retained and
13004 /// aggregated, despite the lack of `BufferCollection` connection at the
13005 /// time of constraints aggregation.
13006 ///
13007 /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
13008 ///
13009 /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
13010 /// end (without `Release` first) will trigger failure of the buffer
13011 /// collection. To close a `BufferCollectionTokenGroup` channel without
13012 /// failing the buffer collection, ensure that AllChildrenPresent() has been
13013 /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
13014 /// client end.
13015 ///
13016 /// If `Release` occurs before
13017 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
13018 /// buffer collection will fail (triggered by reception of `Release` without
13019 /// prior `AllChildrenPresent`). This is intentionally not analogous to how
13020 /// [`fuchsia.sysmem2/BufferCollection.Release`] without
13021 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
13022 /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
13023 /// close requires `AllChildrenPresent` (if not already sent), then
13024 /// `Release`, then close client end.
13025 ///
13026 /// If `Release` occurs after `AllChildrenPresent`, the children and all
13027 /// their constraints remain intact (just as they would if the
13028 /// `BufferCollectionTokenGroup` channel had remained open), and the client
13029 /// end close doesn't trigger buffer collection failure.
13030 ///
13031 /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
13032 ///
13033 /// For brevity, the per-channel-protocol paragraphs above ignore the
13034 /// separate failure domain created by
13035 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
13036 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
13037 /// unexpectedly closes (without `Release` first) and that client end is
13038 /// under a failure domain, instead of failing the whole buffer collection,
13039 /// the failure domain is failed, but the buffer collection itself is
13040 /// isolated from failure of the failure domain. Such failure domains can be
13041 /// nested, in which case only the inner-most failure domain in which the
13042 /// `Node` resides fails.
13043 pub fn r#release(&self) -> Result<(), fidl::Error> {
13044 NodeProxyInterface::r#release(self)
13045 }
13046
13047 /// Set a name for VMOs in this buffer collection.
13048 ///
13049 /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
13050 /// will be truncated to fit. The name of the vmo will be suffixed with the
13051 /// buffer index within the collection (if the suffix fits within
13052 /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
13053 /// listed in the inspect data.
13054 ///
13055 /// The name only affects VMOs allocated after the name is set; this call
13056 /// does not rename existing VMOs. If multiple clients set different names
13057 /// then the larger priority value will win. Setting a new name with the
13058 /// same priority as a prior name doesn't change the name.
13059 ///
13060 /// All table fields are currently required.
13061 ///
13062 /// + request `priority` The name is only set if this is the first `SetName`
13063 /// or if `priority` is greater than any previous `priority` value in
13064 /// prior `SetName` calls across all `Node`(s) of this buffer collection.
13065 /// + request `name` The name for VMOs created under this buffer collection.
13066 pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
13067 NodeProxyInterface::r#set_name(self, payload)
13068 }
13069
13070 /// Set information about the current client that can be used by sysmem to
13071 /// help diagnose leaking memory and allocation stalls waiting for a
13072 /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
13073 ///
13074 /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
13075 /// `Node`(s) derived from this `Node`, unless overriden by
13076 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
13077 /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
13078 ///
13079 /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
13080 /// `Allocator` is the most efficient way to ensure that all
13081 /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
13082 /// set, and is also more efficient than separately sending the same debug
13083 /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
13084 /// created [`fuchsia.sysmem2/Node`].
13085 ///
13086 /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
13087 /// indicate which client is closing their channel first, leading to subtree
13088 /// failure (which can be normal if the purpose of the subtree is over, but
13089 /// if happening earlier than expected, the client-channel-specific name can
13090 /// help diagnose where the failure is first coming from, from sysmem's
13091 /// point of view).
13092 ///
13093 /// All table fields are currently required.
13094 ///
13095 /// + request `name` This can be an arbitrary string, but the current
13096 /// process name (see `fsl::GetCurrentProcessName`) is a good default.
13097 /// + request `id` This can be an arbitrary id, but the current process ID
13098 /// (see `fsl::GetCurrentProcessKoid`) is a good default.
13099 pub fn r#set_debug_client_info(
13100 &self,
13101 mut payload: &NodeSetDebugClientInfoRequest,
13102 ) -> Result<(), fidl::Error> {
13103 NodeProxyInterface::r#set_debug_client_info(self, payload)
13104 }
13105
13106 /// Sysmem logs a warning if sysmem hasn't seen
13107 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
13108 /// within 5 seconds after creation of a new collection.
13109 ///
13110 /// Clients can call this method to change when the log is printed. If
13111 /// multiple client set the deadline, it's unspecified which deadline will
13112 /// take effect.
13113 ///
13114 /// In most cases the default works well.
13115 ///
13116 /// All table fields are currently required.
13117 ///
13118 /// + request `deadline` The time at which sysmem will start trying to log
13119 /// the warning, unless all constraints are with sysmem by then.
13120 pub fn r#set_debug_timeout_log_deadline(
13121 &self,
13122 mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
13123 ) -> Result<(), fidl::Error> {
13124 NodeProxyInterface::r#set_debug_timeout_log_deadline(self, payload)
13125 }
13126
13127 /// This enables verbose logging for the buffer collection.
13128 ///
13129 /// Verbose logging includes constraints set via
13130 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
13131 /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
13132 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
13133 /// the tree of `Node`(s).
13134 ///
13135 /// Normally sysmem prints only a single line complaint when aggregation
13136 /// fails, with just the specific detailed reason that aggregation failed,
13137 /// with little surrounding context. While this is often enough to diagnose
13138 /// a problem if only a small change was made and everything was working
13139 /// before the small change, it's often not particularly helpful for getting
13140 /// a new buffer collection to work for the first time. Especially with
13141 /// more complex trees of nodes, involving things like
13142 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
13143 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
13144 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
13145 /// subtrees of nodes, verbose logging may help in diagnosing what the tree
13146 /// looks like and why it's failing a logical allocation, or why a tree or
13147 /// subtree is failing sooner than expected.
13148 ///
13149 /// The intent of the extra logging is to be acceptable from a performance
13150 /// point of view, under the assumption that verbose logging is only enabled
13151 /// on a low number of buffer collections. If we're not tracking down a bug,
13152 /// we shouldn't send this message.
13153 pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
13154 NodeProxyInterface::r#set_verbose_logging(self)
13155 }
13156
13157 /// This gets a handle that can be used as a parameter to
13158 /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
13159 /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
13160 /// client obtained this handle from this `Node`.
13161 ///
13162 /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
13163 /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
13164 /// despite the two calls typically being on different channels.
13165 ///
13166 /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
13167 ///
13168 /// All table fields are currently required.
13169 ///
13170 /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
13171 /// different `Node` channel, to prove that the client obtained the handle
13172 /// from this `Node`.
13173 pub fn r#get_node_ref(
13174 &self,
13175 ) -> fidl::client::QueryResponseFut<
13176 NodeGetNodeRefResponse,
13177 fidl::encoding::DefaultFuchsiaResourceDialect,
13178 > {
13179 NodeProxyInterface::r#get_node_ref(self)
13180 }
13181
13182 /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
13183 /// rooted at a different child token of a common parent
13184 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
13185 /// passed-in `node_ref`.
13186 ///
13187 /// This call is for assisting with admission control de-duplication, and
13188 /// with debugging.
13189 ///
13190 /// The `node_ref` must be obtained using
13191 /// [`fuchsia.sysmem2/Node.GetNodeRef`].
13192 ///
13193 /// The `node_ref` can be a duplicated handle; it's not necessary to call
13194 /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
13195 ///
13196 /// If a calling token may not actually be a valid token at all due to a
13197 /// potentially hostile/untrusted provider of the token, call
13198 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
13199 /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
13200 /// never responds due to a calling token not being a real token (not really
13201 /// talking to sysmem). Another option is to call
13202 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
13203 /// which also validates the token along with converting it to a
13204 /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
13205 ///
13206 /// All table fields are currently required.
13207 ///
13208 /// - response `is_alternate`
13209 /// - true: The first parent node in common between the calling node and
13210 /// the `node_ref` `Node` is a `BufferCollectionTokenGroup`. This means
13211 /// that the calling `Node` and the `node_ref` `Node` will not have both
13212 /// their constraints apply - rather sysmem will choose one or the other
13213 /// of the constraints - never both. This is because only one child of
13214 /// a `BufferCollectionTokenGroup` is selected during logical
13215 /// allocation, with only that one child's subtree contributing to
13216 /// constraints aggregation.
13217 /// - false: The first parent node in common between the calling `Node`
13218 /// and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
13219 /// Currently, this means the first parent node in common is a
13220 /// `BufferCollectionToken` or `BufferCollection` (regardless of not
13221 /// `Release`ed). This means that the calling `Node` and the `node_ref`
13222 /// `Node` may have both their constraints apply during constraints
13223 /// aggregation of the logical allocation, if both `Node`(s) are
13224 /// selected by any parent `BufferCollectionTokenGroup`(s) involved. In
13225 /// this case, there is no `BufferCollectionTokenGroup` that will
13226 /// directly prevent the two `Node`(s) from both being selected and
13227 /// their constraints both aggregated, but even when false, one or both
13228 /// `Node`(s) may still be eliminated from consideration if one or both
13229 /// `Node`(s) has a direct or indirect parent
13230 /// `BufferCollectionTokenGroup` which selects a child subtree other
13231 /// than the subtree containing the calling `Node` or `node_ref` `Node`.
13232 /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
13233 /// associated with the same buffer collection as the calling `Node`.
13234 /// Another reason for this error is if the `node_ref` is an
13235 /// [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
13236 /// a real `node_ref` obtained from `GetNodeRef`.
13237 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
13238 /// `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
13239 /// the needed rights expected on a real `node_ref`.
13240 /// * No other failing status codes are returned by this call. However,
13241 /// sysmem may add additional codes in future, so the client should have
13242 /// sensible default handling for any failing status code.
13243 pub fn r#is_alternate_for(
13244 &self,
13245 mut payload: NodeIsAlternateForRequest,
13246 ) -> fidl::client::QueryResponseFut<
13247 NodeIsAlternateForResult,
13248 fidl::encoding::DefaultFuchsiaResourceDialect,
13249 > {
13250 NodeProxyInterface::r#is_alternate_for(self, payload)
13251 }
13252
13253 /// Get the buffer collection ID. This ID is also available from
13254 /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
13255 /// within the collection).
13256 ///
13257 /// This call is mainly useful in situations where we can't convey a
13258 /// [`fuchsia.sysmem2/BufferCollectionToken`] or
13259 /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
13260 /// handle, which can be joined back up with a `BufferCollection` client end
13261 /// that was created via a different path. Prefer to convey a
13262 /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
13263 ///
13264 /// Trusting a `buffer_collection_id` value from a source other than sysmem
13265 /// is analogous to trusting a koid value from a source other than zircon.
13266 /// Both should be avoided unless really necessary, and both require
13267 /// caution. In some situations it may be reasonable to refer to a
13268 /// pre-established `BufferCollection` by `buffer_collection_id` via a
13269 /// protocol for efficiency reasons, but an incoming value purporting to be
13270 /// a `buffer_collection_id` is not sufficient alone to justify granting the
13271 /// sender of the `buffer_collection_id` any capability. The sender must
13272 /// first prove to a receiver that the sender has/had a VMO or has/had a
13273 /// `BufferCollectionToken` to the same collection by sending a handle that
13274 /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
13275 /// `buffer_collection_id` value. The receiver should take care to avoid
13276 /// assuming that a sender had a `BufferCollectionToken` in cases where the
13277 /// sender has only proven that the sender had a VMO.
13278 ///
13279 /// - response `buffer_collection_id` This ID is unique per buffer
13280 /// collection per boot. Each buffer is uniquely identified by the
13281 /// `buffer_collection_id` and `buffer_index` together.
13282 pub fn r#get_buffer_collection_id(
13283 &self,
13284 ) -> fidl::client::QueryResponseFut<
13285 NodeGetBufferCollectionIdResponse,
13286 fidl::encoding::DefaultFuchsiaResourceDialect,
13287 > {
13288 NodeProxyInterface::r#get_buffer_collection_id(self)
13289 }
13290
13291 /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
13292 /// created after this message to weak, which means that a client's `Node`
13293 /// client end (or a child created after this message) is not alone
13294 /// sufficient to keep allocated VMOs alive.
13295 ///
13296 /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
13297 /// `close_weak_asap`.
13298 ///
13299 /// This message is only permitted before the `Node` becomes ready for
13300 /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
13301 /// * `BufferCollectionToken`: any time
13302 /// * `BufferCollection`: before `SetConstraints`
13303 /// * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
13304 ///
13305 /// Currently, no conversion from strong `Node` to weak `Node` after ready
13306 /// for allocation is provided, but a client can simulate that by creating
13307 /// an additional `Node` before allocation and setting that additional
13308 /// `Node` to weak, and then potentially at some point later sending
13309 /// `Release` and closing the client end of the client's strong `Node`, but
13310 /// keeping the client's weak `Node`.
13311 ///
13312 /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
13313 /// collection failure (all `Node` client end(s) will see
13314 /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
13315 /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
13316 /// this situation until all `Node`(s) are ready for allocation. For initial
13317 /// allocation to succeed, at least one strong `Node` is required to exist
13318 /// at allocation time, but after that client receives VMO handles, that
13319 /// client can `BufferCollection.Release` and close the client end without
13320 /// causing this type of failure.
13321 ///
13322 /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
13323 /// imply `SetWeakOk` with `for_children_also` true, which can be sent
13324 /// separately as appropriate.
13325 pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
13326 NodeProxyInterface::r#set_weak(self)
13327 }
13328
13329 /// This indicates to sysmem that the client is prepared to pay attention to
13330 /// `close_weak_asap`.
13331 ///
13332 /// If sent, this message must be before
13333 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
13334 ///
13335 /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
13336 /// send this message before `WaitForAllBuffersAllocated`, or a parent
13337 /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
13338 /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
13339 /// trigger buffer collection failure.
13340 ///
13341 /// This message is necessary because weak sysmem VMOs have not always been
13342 /// a thing, so older clients are not aware of the need to pay attention to
13343 /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
13344 /// sysmem weak VMO handles asap. By having this message and requiring
13345 /// participants to indicate their acceptance of this aspect of the overall
13346 /// protocol, we avoid situations where an older client is delivered a weak
13347 /// VMO without any way for sysmem to get that VMO to close quickly later
13348 /// (and on a per-buffer basis).
13349 ///
13350 /// A participant that doesn't handle `close_weak_asap` and also doesn't
13351 /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
13352 /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
13353 /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
13354 /// same participant has a child/delegate which does retrieve VMOs, that
13355 /// child/delegate will need to send `SetWeakOk` before
13356 /// `WaitForAllBuffersAllocated`.
13357 ///
13358 /// + request `for_child_nodes_also` If present and true, this means direct
13359 /// child nodes of this node created after this message plus all
13360 /// descendants of those nodes will behave as if `SetWeakOk` was sent on
13361 /// those nodes. Any child node of this node that was created before this
13362 /// message is not included. This setting is "sticky" in the sense that a
13363 /// subsequent `SetWeakOk` without this bool set to true does not reset
13364 /// the server-side bool. If this creates a problem for a participant, a
13365 /// workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
13366 /// tokens instead, as appropriate. A participant should only set
13367 /// `for_child_nodes_also` true if the participant can really promise to
13368 /// obey `close_weak_asap` both for its own weak VMO handles, and for all
13369 /// weak VMO handles held by participants holding the corresponding child
13370 /// `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
13371 /// which are using sysmem(1) can be weak, despite the clients of those
13372 /// sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
13373 /// direct way to find out about `close_weak_asap`. This only applies to
13374 /// descendents of this `Node` which are using sysmem(1), not to this
13375 /// `Node` when converted directly from a sysmem2 token to a sysmem(1)
13376 /// token, which will fail allocation unless an ancestor of this `Node`
13377 /// specified `for_child_nodes_also` true.
13378 pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
13379 NodeProxyInterface::r#set_weak_ok(self, payload)
13380 }
13381
13382 /// The server_end will be closed after this `Node` and any child nodes have
13383 /// have released their buffer counts, making those counts available for
13384 /// reservation by a different `Node` via
13385 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
13386 ///
13387 /// The `Node` buffer counts may not be released until the entire tree of
13388 /// `Node`(s) is closed or failed, because
13389 /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
13390 /// does not immediately un-reserve the `Node` buffer counts. Instead, the
13391 /// `Node` buffer counts remain reserved until the orphaned node is later
13392 /// cleaned up.
13393 ///
13394 /// If the `Node` exceeds a fairly large number of attached eventpair server
13395 /// ends, a log message will indicate this and the `Node` (and the
13396 /// appropriate) sub-tree will fail.
13397 ///
13398 /// The `server_end` will remain open when
13399 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
13400 /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
13401 /// [`fuchsia.sysmem2/BufferCollection`].
13402 ///
13403 /// This message can also be used with a
13404 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
13405 pub fn r#attach_node_tracking(
13406 &self,
13407 mut payload: NodeAttachNodeTrackingRequest,
13408 ) -> Result<(), fidl::Error> {
13409 NodeProxyInterface::r#attach_node_tracking(self, payload)
13410 }
13411}
13412
13413impl NodeProxyInterface for NodeProxy {
13414 type SyncResponseFut =
13415 fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect>;
13416 fn r#sync(&self) -> Self::SyncResponseFut {
13417 fn _decode(
13418 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
13419 ) -> Result<(), fidl::Error> {
13420 let _response = fidl::client::decode_transaction_body::<
13421 fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
13422 fidl::encoding::DefaultFuchsiaResourceDialect,
13423 0x11ac2555cf575b54,
13424 >(_buf?)?
13425 .into_result::<NodeMarker>("sync")?;
13426 Ok(_response)
13427 }
13428 self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, ()>(
13429 (),
13430 0x11ac2555cf575b54,
13431 fidl::encoding::DynamicFlags::FLEXIBLE,
13432 _decode,
13433 )
13434 }
13435
13436 fn r#release(&self) -> Result<(), fidl::Error> {
13437 self.client.send::<fidl::encoding::EmptyPayload>(
13438 (),
13439 0x6a5cae7d6d6e04c6,
13440 fidl::encoding::DynamicFlags::FLEXIBLE,
13441 )
13442 }
13443
13444 fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
13445 self.client.send::<NodeSetNameRequest>(
13446 payload,
13447 0xb41f1624f48c1e9,
13448 fidl::encoding::DynamicFlags::FLEXIBLE,
13449 )
13450 }
13451
13452 fn r#set_debug_client_info(
13453 &self,
13454 mut payload: &NodeSetDebugClientInfoRequest,
13455 ) -> Result<(), fidl::Error> {
13456 self.client.send::<NodeSetDebugClientInfoRequest>(
13457 payload,
13458 0x5cde8914608d99b1,
13459 fidl::encoding::DynamicFlags::FLEXIBLE,
13460 )
13461 }
13462
13463 fn r#set_debug_timeout_log_deadline(
13464 &self,
13465 mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
13466 ) -> Result<(), fidl::Error> {
13467 self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
13468 payload,
13469 0x716b0af13d5c0806,
13470 fidl::encoding::DynamicFlags::FLEXIBLE,
13471 )
13472 }
13473
13474 fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
13475 self.client.send::<fidl::encoding::EmptyPayload>(
13476 (),
13477 0x5209c77415b4dfad,
13478 fidl::encoding::DynamicFlags::FLEXIBLE,
13479 )
13480 }
13481
13482 type GetNodeRefResponseFut = fidl::client::QueryResponseFut<
13483 NodeGetNodeRefResponse,
13484 fidl::encoding::DefaultFuchsiaResourceDialect,
13485 >;
13486 fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut {
13487 fn _decode(
13488 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
13489 ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
13490 let _response = fidl::client::decode_transaction_body::<
13491 fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
13492 fidl::encoding::DefaultFuchsiaResourceDialect,
13493 0x5b3d0e51614df053,
13494 >(_buf?)?
13495 .into_result::<NodeMarker>("get_node_ref")?;
13496 Ok(_response)
13497 }
13498 self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, NodeGetNodeRefResponse>(
13499 (),
13500 0x5b3d0e51614df053,
13501 fidl::encoding::DynamicFlags::FLEXIBLE,
13502 _decode,
13503 )
13504 }
13505
13506 type IsAlternateForResponseFut = fidl::client::QueryResponseFut<
13507 NodeIsAlternateForResult,
13508 fidl::encoding::DefaultFuchsiaResourceDialect,
13509 >;
13510 fn r#is_alternate_for(
13511 &self,
13512 mut payload: NodeIsAlternateForRequest,
13513 ) -> Self::IsAlternateForResponseFut {
13514 fn _decode(
13515 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
13516 ) -> Result<NodeIsAlternateForResult, fidl::Error> {
13517 let _response = fidl::client::decode_transaction_body::<
13518 fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
13519 fidl::encoding::DefaultFuchsiaResourceDialect,
13520 0x3a58e00157e0825,
13521 >(_buf?)?
13522 .into_result::<NodeMarker>("is_alternate_for")?;
13523 Ok(_response.map(|x| x))
13524 }
13525 self.client.send_query_and_decode::<NodeIsAlternateForRequest, NodeIsAlternateForResult>(
13526 &mut payload,
13527 0x3a58e00157e0825,
13528 fidl::encoding::DynamicFlags::FLEXIBLE,
13529 _decode,
13530 )
13531 }
13532
13533 type GetBufferCollectionIdResponseFut = fidl::client::QueryResponseFut<
13534 NodeGetBufferCollectionIdResponse,
13535 fidl::encoding::DefaultFuchsiaResourceDialect,
13536 >;
13537 fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut {
13538 fn _decode(
13539 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
13540 ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
13541 let _response = fidl::client::decode_transaction_body::<
13542 fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
13543 fidl::encoding::DefaultFuchsiaResourceDialect,
13544 0x77d19a494b78ba8c,
13545 >(_buf?)?
13546 .into_result::<NodeMarker>("get_buffer_collection_id")?;
13547 Ok(_response)
13548 }
13549 self.client.send_query_and_decode::<
13550 fidl::encoding::EmptyPayload,
13551 NodeGetBufferCollectionIdResponse,
13552 >(
13553 (),
13554 0x77d19a494b78ba8c,
13555 fidl::encoding::DynamicFlags::FLEXIBLE,
13556 _decode,
13557 )
13558 }
13559
13560 fn r#set_weak(&self) -> Result<(), fidl::Error> {
13561 self.client.send::<fidl::encoding::EmptyPayload>(
13562 (),
13563 0x22dd3ea514eeffe1,
13564 fidl::encoding::DynamicFlags::FLEXIBLE,
13565 )
13566 }
13567
13568 fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
13569 self.client.send::<NodeSetWeakOkRequest>(
13570 &mut payload,
13571 0x38a44fc4d7724be9,
13572 fidl::encoding::DynamicFlags::FLEXIBLE,
13573 )
13574 }
13575
13576 fn r#attach_node_tracking(
13577 &self,
13578 mut payload: NodeAttachNodeTrackingRequest,
13579 ) -> Result<(), fidl::Error> {
13580 self.client.send::<NodeAttachNodeTrackingRequest>(
13581 &mut payload,
13582 0x3f22f2a293d3cdac,
13583 fidl::encoding::DynamicFlags::FLEXIBLE,
13584 )
13585 }
13586}
13587
13588pub struct NodeEventStream {
13589 event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
13590}
13591
13592impl std::marker::Unpin for NodeEventStream {}
13593
13594impl futures::stream::FusedStream for NodeEventStream {
13595 fn is_terminated(&self) -> bool {
13596 self.event_receiver.is_terminated()
13597 }
13598}
13599
13600impl futures::Stream for NodeEventStream {
13601 type Item = Result<NodeEvent, fidl::Error>;
13602
13603 fn poll_next(
13604 mut self: std::pin::Pin<&mut Self>,
13605 cx: &mut std::task::Context<'_>,
13606 ) -> std::task::Poll<Option<Self::Item>> {
13607 match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
13608 &mut self.event_receiver,
13609 cx
13610 )?) {
13611 Some(buf) => std::task::Poll::Ready(Some(NodeEvent::decode(buf))),
13612 None => std::task::Poll::Ready(None),
13613 }
13614 }
13615}
13616
13617#[derive(Debug)]
13618pub enum NodeEvent {
13619 #[non_exhaustive]
13620 _UnknownEvent {
13621 /// Ordinal of the event that was sent.
13622 ordinal: u64,
13623 },
13624}
13625
13626impl NodeEvent {
13627 /// Decodes a message buffer as a [`NodeEvent`].
13628 fn decode(
13629 mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
13630 ) -> Result<NodeEvent, fidl::Error> {
13631 let (bytes, _handles) = buf.split_mut();
13632 let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
13633 debug_assert_eq!(tx_header.tx_id, 0);
13634 match tx_header.ordinal {
13635 _ if tx_header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
13636 Ok(NodeEvent::_UnknownEvent { ordinal: tx_header.ordinal })
13637 }
13638 _ => Err(fidl::Error::UnknownOrdinal {
13639 ordinal: tx_header.ordinal,
13640 protocol_name: <NodeMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
13641 }),
13642 }
13643 }
13644}
13645
13646/// A Stream of incoming requests for fuchsia.sysmem2/Node.
13647pub struct NodeRequestStream {
13648 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
13649 is_terminated: bool,
13650}
13651
13652impl std::marker::Unpin for NodeRequestStream {}
13653
13654impl futures::stream::FusedStream for NodeRequestStream {
13655 fn is_terminated(&self) -> bool {
13656 self.is_terminated
13657 }
13658}
13659
13660impl fidl::endpoints::RequestStream for NodeRequestStream {
13661 type Protocol = NodeMarker;
13662 type ControlHandle = NodeControlHandle;
13663
13664 fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
13665 Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
13666 }
13667
13668 fn control_handle(&self) -> Self::ControlHandle {
13669 NodeControlHandle { inner: self.inner.clone() }
13670 }
13671
13672 fn into_inner(
13673 self,
13674 ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
13675 {
13676 (self.inner, self.is_terminated)
13677 }
13678
13679 fn from_inner(
13680 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
13681 is_terminated: bool,
13682 ) -> Self {
13683 Self { inner, is_terminated }
13684 }
13685}
13686
13687impl futures::Stream for NodeRequestStream {
13688 type Item = Result<NodeRequest, fidl::Error>;
13689
13690 fn poll_next(
13691 mut self: std::pin::Pin<&mut Self>,
13692 cx: &mut std::task::Context<'_>,
13693 ) -> std::task::Poll<Option<Self::Item>> {
13694 let this = &mut *self;
13695 if this.inner.check_shutdown(cx) {
13696 this.is_terminated = true;
13697 return std::task::Poll::Ready(None);
13698 }
13699 if this.is_terminated {
13700 panic!("polled NodeRequestStream after completion");
13701 }
13702 fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
13703 |bytes, handles| {
13704 match this.inner.channel().read_etc(cx, bytes, handles) {
13705 std::task::Poll::Ready(Ok(())) => {}
13706 std::task::Poll::Pending => return std::task::Poll::Pending,
13707 std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
13708 this.is_terminated = true;
13709 return std::task::Poll::Ready(None);
13710 }
13711 std::task::Poll::Ready(Err(e)) => {
13712 return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
13713 e.into(),
13714 ))));
13715 }
13716 }
13717
13718 // A message has been received from the channel
13719 let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
13720
13721 std::task::Poll::Ready(Some(match header.ordinal {
13722 0x11ac2555cf575b54 => {
13723 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
13724 let mut req = fidl::new_empty!(
13725 fidl::encoding::EmptyPayload,
13726 fidl::encoding::DefaultFuchsiaResourceDialect
13727 );
13728 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
13729 let control_handle = NodeControlHandle { inner: this.inner.clone() };
13730 Ok(NodeRequest::Sync {
13731 responder: NodeSyncResponder {
13732 control_handle: std::mem::ManuallyDrop::new(control_handle),
13733 tx_id: header.tx_id,
13734 },
13735 })
13736 }
13737 0x6a5cae7d6d6e04c6 => {
13738 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13739 let mut req = fidl::new_empty!(
13740 fidl::encoding::EmptyPayload,
13741 fidl::encoding::DefaultFuchsiaResourceDialect
13742 );
13743 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
13744 let control_handle = NodeControlHandle { inner: this.inner.clone() };
13745 Ok(NodeRequest::Release { control_handle })
13746 }
13747 0xb41f1624f48c1e9 => {
13748 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13749 let mut req = fidl::new_empty!(
13750 NodeSetNameRequest,
13751 fidl::encoding::DefaultFuchsiaResourceDialect
13752 );
13753 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetNameRequest>(&header, _body_bytes, handles, &mut req)?;
13754 let control_handle = NodeControlHandle { inner: this.inner.clone() };
13755 Ok(NodeRequest::SetName { payload: req, control_handle })
13756 }
13757 0x5cde8914608d99b1 => {
13758 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13759 let mut req = fidl::new_empty!(
13760 NodeSetDebugClientInfoRequest,
13761 fidl::encoding::DefaultFuchsiaResourceDialect
13762 );
13763 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugClientInfoRequest>(&header, _body_bytes, handles, &mut req)?;
13764 let control_handle = NodeControlHandle { inner: this.inner.clone() };
13765 Ok(NodeRequest::SetDebugClientInfo { payload: req, control_handle })
13766 }
13767 0x716b0af13d5c0806 => {
13768 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13769 let mut req = fidl::new_empty!(
13770 NodeSetDebugTimeoutLogDeadlineRequest,
13771 fidl::encoding::DefaultFuchsiaResourceDialect
13772 );
13773 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugTimeoutLogDeadlineRequest>(&header, _body_bytes, handles, &mut req)?;
13774 let control_handle = NodeControlHandle { inner: this.inner.clone() };
13775 Ok(NodeRequest::SetDebugTimeoutLogDeadline { payload: req, control_handle })
13776 }
13777 0x5209c77415b4dfad => {
13778 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13779 let mut req = fidl::new_empty!(
13780 fidl::encoding::EmptyPayload,
13781 fidl::encoding::DefaultFuchsiaResourceDialect
13782 );
13783 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
13784 let control_handle = NodeControlHandle { inner: this.inner.clone() };
13785 Ok(NodeRequest::SetVerboseLogging { control_handle })
13786 }
13787 0x5b3d0e51614df053 => {
13788 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
13789 let mut req = fidl::new_empty!(
13790 fidl::encoding::EmptyPayload,
13791 fidl::encoding::DefaultFuchsiaResourceDialect
13792 );
13793 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
13794 let control_handle = NodeControlHandle { inner: this.inner.clone() };
13795 Ok(NodeRequest::GetNodeRef {
13796 responder: NodeGetNodeRefResponder {
13797 control_handle: std::mem::ManuallyDrop::new(control_handle),
13798 tx_id: header.tx_id,
13799 },
13800 })
13801 }
13802 0x3a58e00157e0825 => {
13803 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
13804 let mut req = fidl::new_empty!(
13805 NodeIsAlternateForRequest,
13806 fidl::encoding::DefaultFuchsiaResourceDialect
13807 );
13808 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeIsAlternateForRequest>(&header, _body_bytes, handles, &mut req)?;
13809 let control_handle = NodeControlHandle { inner: this.inner.clone() };
13810 Ok(NodeRequest::IsAlternateFor {
13811 payload: req,
13812 responder: NodeIsAlternateForResponder {
13813 control_handle: std::mem::ManuallyDrop::new(control_handle),
13814 tx_id: header.tx_id,
13815 },
13816 })
13817 }
13818 0x77d19a494b78ba8c => {
13819 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
13820 let mut req = fidl::new_empty!(
13821 fidl::encoding::EmptyPayload,
13822 fidl::encoding::DefaultFuchsiaResourceDialect
13823 );
13824 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
13825 let control_handle = NodeControlHandle { inner: this.inner.clone() };
13826 Ok(NodeRequest::GetBufferCollectionId {
13827 responder: NodeGetBufferCollectionIdResponder {
13828 control_handle: std::mem::ManuallyDrop::new(control_handle),
13829 tx_id: header.tx_id,
13830 },
13831 })
13832 }
13833 0x22dd3ea514eeffe1 => {
13834 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13835 let mut req = fidl::new_empty!(
13836 fidl::encoding::EmptyPayload,
13837 fidl::encoding::DefaultFuchsiaResourceDialect
13838 );
13839 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
13840 let control_handle = NodeControlHandle { inner: this.inner.clone() };
13841 Ok(NodeRequest::SetWeak { control_handle })
13842 }
13843 0x38a44fc4d7724be9 => {
13844 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13845 let mut req = fidl::new_empty!(
13846 NodeSetWeakOkRequest,
13847 fidl::encoding::DefaultFuchsiaResourceDialect
13848 );
13849 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetWeakOkRequest>(&header, _body_bytes, handles, &mut req)?;
13850 let control_handle = NodeControlHandle { inner: this.inner.clone() };
13851 Ok(NodeRequest::SetWeakOk { payload: req, control_handle })
13852 }
13853 0x3f22f2a293d3cdac => {
13854 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13855 let mut req = fidl::new_empty!(
13856 NodeAttachNodeTrackingRequest,
13857 fidl::encoding::DefaultFuchsiaResourceDialect
13858 );
13859 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeAttachNodeTrackingRequest>(&header, _body_bytes, handles, &mut req)?;
13860 let control_handle = NodeControlHandle { inner: this.inner.clone() };
13861 Ok(NodeRequest::AttachNodeTracking { payload: req, control_handle })
13862 }
13863 _ if header.tx_id == 0
13864 && header
13865 .dynamic_flags()
13866 .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
13867 {
13868 Ok(NodeRequest::_UnknownMethod {
13869 ordinal: header.ordinal,
13870 control_handle: NodeControlHandle { inner: this.inner.clone() },
13871 method_type: fidl::MethodType::OneWay,
13872 })
13873 }
13874 _ if header
13875 .dynamic_flags()
13876 .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
13877 {
13878 this.inner.send_framework_err(
13879 fidl::encoding::FrameworkErr::UnknownMethod,
13880 header.tx_id,
13881 header.ordinal,
13882 header.dynamic_flags(),
13883 (bytes, handles),
13884 )?;
13885 Ok(NodeRequest::_UnknownMethod {
13886 ordinal: header.ordinal,
13887 control_handle: NodeControlHandle { inner: this.inner.clone() },
13888 method_type: fidl::MethodType::TwoWay,
13889 })
13890 }
13891 _ => Err(fidl::Error::UnknownOrdinal {
13892 ordinal: header.ordinal,
13893 protocol_name: <NodeMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
13894 }),
13895 }))
13896 },
13897 )
13898 }
13899}
13900
13901/// This protocol is the parent protocol for all nodes in the tree established
13902/// by [`fuchsia.sysmem2/BufferCollectionToken`] creation and
13903/// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] creation, including
13904/// [`fuchsia.sysmem2/BufferCollectionToken`](s) which have since been converted
13905/// to a [`fuchsia.sysmem2/BufferCollection`] channel.
13906///
13907/// Epitaphs are not used in this protocol.
13908#[derive(Debug)]
13909pub enum NodeRequest {
13910 /// Ensure that previous messages have been received server side. This is
13911 /// particularly useful after previous messages that created new tokens,
13912 /// because a token must be known to the sysmem server before sending the
13913 /// token to another participant.
13914 ///
13915 /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
13916 /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
13917 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
13918 /// to mitigate the possibility of a hostile/fake
13919 /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
13920 /// Another way is to pass the token to
13921 /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
13922 /// the token as part of exchanging it for a
13923 /// [`fuchsia.sysmem2/BufferCollection`] channel, and
13924 /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
13925 /// of stalling.
13926 ///
13927 /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
13928 /// and then starting and completing a `Sync`, it's then safe to send the
13929 /// `BufferCollectionToken` client ends to other participants knowing the
13930 /// server will recognize the tokens when they're sent by the other
13931 /// participants to sysmem in a
13932 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
13933 /// efficient way to create tokens while avoiding unnecessary round trips.
13934 ///
13935 /// Other options include waiting for each
13936 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
13937 /// individually (using separate call to `Sync` after each), or calling
13938 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
13939 /// converted to a `BufferCollection` via
13940 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
13941 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
13942 /// the sync step and can create multiple tokens at once.
13943 Sync { responder: NodeSyncResponder },
13944 /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
13945 ///
13946 /// Normally a participant will convert a `BufferCollectionToken` into a
13947 /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
13948 /// `Release` via the token (and then close the channel immediately or
13949 /// shortly later in response to server closing the server end), which
13950 /// avoids causing buffer collection failure. Without a prior `Release`,
13951 /// closing the `BufferCollectionToken` client end will cause buffer
13952 /// collection failure.
13953 ///
13954 /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
13955 ///
13956 /// By default the server handles unexpected closure of a
13957 /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
13958 /// first) by failing the buffer collection. Partly this is to expedite
13959 /// closing VMO handles to reclaim memory when any participant fails. If a
13960 /// participant would like to cleanly close a `BufferCollection` without
13961 /// causing buffer collection failure, the participant can send `Release`
13962 /// before closing the `BufferCollection` client end. The `Release` can
13963 /// occur before or after `SetConstraints`. If before `SetConstraints`, the
13964 /// buffer collection won't require constraints from this node in order to
13965 /// allocate. If after `SetConstraints`, the constraints are retained and
13966 /// aggregated, despite the lack of `BufferCollection` connection at the
13967 /// time of constraints aggregation.
13968 ///
13969 /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
13970 ///
13971 /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
13972 /// end (without `Release` first) will trigger failure of the buffer
13973 /// collection. To close a `BufferCollectionTokenGroup` channel without
13974 /// failing the buffer collection, ensure that AllChildrenPresent() has been
13975 /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
13976 /// client end.
13977 ///
13978 /// If `Release` occurs before
13979 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
13980 /// buffer collection will fail (triggered by reception of `Release` without
13981 /// prior `AllChildrenPresent`). This is intentionally not analogous to how
13982 /// [`fuchsia.sysmem2/BufferCollection.Release`] without
13983 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
13984 /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
13985 /// close requires `AllChildrenPresent` (if not already sent), then
13986 /// `Release`, then close client end.
13987 ///
13988 /// If `Release` occurs after `AllChildrenPresent`, the children and all
13989 /// their constraints remain intact (just as they would if the
13990 /// `BufferCollectionTokenGroup` channel had remained open), and the client
13991 /// end close doesn't trigger buffer collection failure.
13992 ///
13993 /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
13994 ///
13995 /// For brevity, the per-channel-protocol paragraphs above ignore the
13996 /// separate failure domain created by
13997 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
13998 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
13999 /// unexpectedly closes (without `Release` first) and that client end is
14000 /// under a failure domain, instead of failing the whole buffer collection,
14001 /// the failure domain is failed, but the buffer collection itself is
14002 /// isolated from failure of the failure domain. Such failure domains can be
14003 /// nested, in which case only the inner-most failure domain in which the
14004 /// `Node` resides fails.
14005 Release { control_handle: NodeControlHandle },
14006 /// Set a name for VMOs in this buffer collection.
14007 ///
14008 /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
14009 /// will be truncated to fit. The name of the vmo will be suffixed with the
14010 /// buffer index within the collection (if the suffix fits within
14011 /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
14012 /// listed in the inspect data.
14013 ///
14014 /// The name only affects VMOs allocated after the name is set; this call
14015 /// does not rename existing VMOs. If multiple clients set different names
14016 /// then the larger priority value will win. Setting a new name with the
14017 /// same priority as a prior name doesn't change the name.
14018 ///
14019 /// All table fields are currently required.
14020 ///
14021 /// + request `priority` The name is only set if this is the first `SetName`
14022 /// or if `priority` is greater than any previous `priority` value in
14023 /// prior `SetName` calls across all `Node`(s) of this buffer collection.
14024 /// + request `name` The name for VMOs created under this buffer collection.
14025 SetName { payload: NodeSetNameRequest, control_handle: NodeControlHandle },
14026 /// Set information about the current client that can be used by sysmem to
14027 /// help diagnose leaking memory and allocation stalls waiting for a
14028 /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
14029 ///
14030 /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
14031 /// `Node`(s) derived from this `Node`, unless overriden by
14032 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
14033 /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
14034 ///
14035 /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
14036 /// `Allocator` is the most efficient way to ensure that all
14037 /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
14038 /// set, and is also more efficient than separately sending the same debug
14039 /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
14040 /// created [`fuchsia.sysmem2/Node`].
14041 ///
14042 /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
14043 /// indicate which client is closing their channel first, leading to subtree
14044 /// failure (which can be normal if the purpose of the subtree is over, but
14045 /// if happening earlier than expected, the client-channel-specific name can
14046 /// help diagnose where the failure is first coming from, from sysmem's
14047 /// point of view).
14048 ///
14049 /// All table fields are currently required.
14050 ///
14051 /// + request `name` This can be an arbitrary string, but the current
14052 /// process name (see `fsl::GetCurrentProcessName`) is a good default.
14053 /// + request `id` This can be an arbitrary id, but the current process ID
14054 /// (see `fsl::GetCurrentProcessKoid`) is a good default.
14055 SetDebugClientInfo { payload: NodeSetDebugClientInfoRequest, control_handle: NodeControlHandle },
14056 /// Sysmem logs a warning if sysmem hasn't seen
14057 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
14058 /// within 5 seconds after creation of a new collection.
14059 ///
14060 /// Clients can call this method to change when the log is printed. If
14061 /// multiple client set the deadline, it's unspecified which deadline will
14062 /// take effect.
14063 ///
14064 /// In most cases the default works well.
14065 ///
14066 /// All table fields are currently required.
14067 ///
14068 /// + request `deadline` The time at which sysmem will start trying to log
14069 /// the warning, unless all constraints are with sysmem by then.
14070 SetDebugTimeoutLogDeadline {
14071 payload: NodeSetDebugTimeoutLogDeadlineRequest,
14072 control_handle: NodeControlHandle,
14073 },
14074 /// This enables verbose logging for the buffer collection.
14075 ///
14076 /// Verbose logging includes constraints set via
14077 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
14078 /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
14079 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
14080 /// the tree of `Node`(s).
14081 ///
14082 /// Normally sysmem prints only a single line complaint when aggregation
14083 /// fails, with just the specific detailed reason that aggregation failed,
14084 /// with little surrounding context. While this is often enough to diagnose
14085 /// a problem if only a small change was made and everything was working
14086 /// before the small change, it's often not particularly helpful for getting
14087 /// a new buffer collection to work for the first time. Especially with
14088 /// more complex trees of nodes, involving things like
14089 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
14090 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
14091 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
14092 /// subtrees of nodes, verbose logging may help in diagnosing what the tree
14093 /// looks like and why it's failing a logical allocation, or why a tree or
14094 /// subtree is failing sooner than expected.
14095 ///
14096 /// The intent of the extra logging is to be acceptable from a performance
14097 /// point of view, under the assumption that verbose logging is only enabled
14098 /// on a low number of buffer collections. If we're not tracking down a bug,
14099 /// we shouldn't send this message.
14100 SetVerboseLogging { control_handle: NodeControlHandle },
14101 /// This gets a handle that can be used as a parameter to
14102 /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
14103 /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
14104 /// client obtained this handle from this `Node`.
14105 ///
14106 /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
14107 /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
14108 /// despite the two calls typically being on different channels.
14109 ///
14110 /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
14111 ///
14112 /// All table fields are currently required.
14113 ///
14114 /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
14115 /// different `Node` channel, to prove that the client obtained the handle
14116 /// from this `Node`.
14117 GetNodeRef { responder: NodeGetNodeRefResponder },
14118 /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
14119 /// rooted at a different child token of a common parent
14120 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
14121 /// passed-in `node_ref`.
14122 ///
14123 /// This call is for assisting with admission control de-duplication, and
14124 /// with debugging.
14125 ///
14126 /// The `node_ref` must be obtained using
14127 /// [`fuchsia.sysmem2/Node.GetNodeRef`].
14128 ///
14129 /// The `node_ref` can be a duplicated handle; it's not necessary to call
14130 /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
14131 ///
14132 /// If a calling token may not actually be a valid token at all due to a
14133 /// potentially hostile/untrusted provider of the token, call
14134 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
14135 /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
14136 /// never responds due to a calling token not being a real token (not really
14137 /// talking to sysmem). Another option is to call
14138 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
14139 /// which also validates the token along with converting it to a
14140 /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
14141 ///
14142 /// All table fields are currently required.
14143 ///
14144 /// - response `is_alternate`
14145 /// - true: The first parent node in common between the calling node and
14146 /// the `node_ref` `Node` is a `BufferCollectionTokenGroup`. This means
14147 /// that the calling `Node` and the `node_ref` `Node` will not have both
14148 /// their constraints apply - rather sysmem will choose one or the other
14149 /// of the constraints - never both. This is because only one child of
14150 /// a `BufferCollectionTokenGroup` is selected during logical
14151 /// allocation, with only that one child's subtree contributing to
14152 /// constraints aggregation.
14153 /// - false: The first parent node in common between the calling `Node`
14154 /// and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
14155 /// Currently, this means the first parent node in common is a
14156 /// `BufferCollectionToken` or `BufferCollection` (regardless of not
14157 /// `Release`ed). This means that the calling `Node` and the `node_ref`
14158 /// `Node` may have both their constraints apply during constraints
14159 /// aggregation of the logical allocation, if both `Node`(s) are
14160 /// selected by any parent `BufferCollectionTokenGroup`(s) involved. In
14161 /// this case, there is no `BufferCollectionTokenGroup` that will
14162 /// directly prevent the two `Node`(s) from both being selected and
14163 /// their constraints both aggregated, but even when false, one or both
14164 /// `Node`(s) may still be eliminated from consideration if one or both
14165 /// `Node`(s) has a direct or indirect parent
14166 /// `BufferCollectionTokenGroup` which selects a child subtree other
14167 /// than the subtree containing the calling `Node` or `node_ref` `Node`.
14168 /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
14169 /// associated with the same buffer collection as the calling `Node`.
14170 /// Another reason for this error is if the `node_ref` is an
14171 /// [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
14172 /// a real `node_ref` obtained from `GetNodeRef`.
14173 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
14174 /// `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
14175 /// the needed rights expected on a real `node_ref`.
14176 /// * No other failing status codes are returned by this call. However,
14177 /// sysmem may add additional codes in future, so the client should have
14178 /// sensible default handling for any failing status code.
14179 IsAlternateFor { payload: NodeIsAlternateForRequest, responder: NodeIsAlternateForResponder },
14180 /// Get the buffer collection ID. This ID is also available from
14181 /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
14182 /// within the collection).
14183 ///
14184 /// This call is mainly useful in situations where we can't convey a
14185 /// [`fuchsia.sysmem2/BufferCollectionToken`] or
14186 /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
14187 /// handle, which can be joined back up with a `BufferCollection` client end
14188 /// that was created via a different path. Prefer to convey a
14189 /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
14190 ///
14191 /// Trusting a `buffer_collection_id` value from a source other than sysmem
14192 /// is analogous to trusting a koid value from a source other than zircon.
14193 /// Both should be avoided unless really necessary, and both require
14194 /// caution. In some situations it may be reasonable to refer to a
14195 /// pre-established `BufferCollection` by `buffer_collection_id` via a
14196 /// protocol for efficiency reasons, but an incoming value purporting to be
14197 /// a `buffer_collection_id` is not sufficient alone to justify granting the
14198 /// sender of the `buffer_collection_id` any capability. The sender must
14199 /// first prove to a receiver that the sender has/had a VMO or has/had a
14200 /// `BufferCollectionToken` to the same collection by sending a handle that
14201 /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
14202 /// `buffer_collection_id` value. The receiver should take care to avoid
14203 /// assuming that a sender had a `BufferCollectionToken` in cases where the
14204 /// sender has only proven that the sender had a VMO.
14205 ///
14206 /// - response `buffer_collection_id` This ID is unique per buffer
14207 /// collection per boot. Each buffer is uniquely identified by the
14208 /// `buffer_collection_id` and `buffer_index` together.
14209 GetBufferCollectionId { responder: NodeGetBufferCollectionIdResponder },
14210 /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
14211 /// created after this message to weak, which means that a client's `Node`
14212 /// client end (or a child created after this message) is not alone
14213 /// sufficient to keep allocated VMOs alive.
14214 ///
14215 /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
14216 /// `close_weak_asap`.
14217 ///
14218 /// This message is only permitted before the `Node` becomes ready for
14219 /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
14220 /// * `BufferCollectionToken`: any time
14221 /// * `BufferCollection`: before `SetConstraints`
14222 /// * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
14223 ///
14224 /// Currently, no conversion from strong `Node` to weak `Node` after ready
14225 /// for allocation is provided, but a client can simulate that by creating
14226 /// an additional `Node` before allocation and setting that additional
14227 /// `Node` to weak, and then potentially at some point later sending
14228 /// `Release` and closing the client end of the client's strong `Node`, but
14229 /// keeping the client's weak `Node`.
14230 ///
14231 /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
14232 /// collection failure (all `Node` client end(s) will see
14233 /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
14234 /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
14235 /// this situation until all `Node`(s) are ready for allocation. For initial
14236 /// allocation to succeed, at least one strong `Node` is required to exist
14237 /// at allocation time, but after that client receives VMO handles, that
14238 /// client can `BufferCollection.Release` and close the client end without
14239 /// causing this type of failure.
14240 ///
14241 /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
14242 /// imply `SetWeakOk` with `for_children_also` true, which can be sent
14243 /// separately as appropriate.
14244 SetWeak { control_handle: NodeControlHandle },
14245 /// This indicates to sysmem that the client is prepared to pay attention to
14246 /// `close_weak_asap`.
14247 ///
14248 /// If sent, this message must be before
14249 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
14250 ///
14251 /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
14252 /// send this message before `WaitForAllBuffersAllocated`, or a parent
14253 /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
14254 /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
14255 /// trigger buffer collection failure.
14256 ///
14257 /// This message is necessary because weak sysmem VMOs have not always been
14258 /// a thing, so older clients are not aware of the need to pay attention to
14259 /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
14260 /// sysmem weak VMO handles asap. By having this message and requiring
14261 /// participants to indicate their acceptance of this aspect of the overall
14262 /// protocol, we avoid situations where an older client is delivered a weak
14263 /// VMO without any way for sysmem to get that VMO to close quickly later
14264 /// (and on a per-buffer basis).
14265 ///
14266 /// A participant that doesn't handle `close_weak_asap` and also doesn't
14267 /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
14268 /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
14269 /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
14270 /// same participant has a child/delegate which does retrieve VMOs, that
14271 /// child/delegate will need to send `SetWeakOk` before
14272 /// `WaitForAllBuffersAllocated`.
14273 ///
14274 /// + request `for_child_nodes_also` If present and true, this means direct
14275 /// child nodes of this node created after this message plus all
14276 /// descendants of those nodes will behave as if `SetWeakOk` was sent on
14277 /// those nodes. Any child node of this node that was created before this
14278 /// message is not included. This setting is "sticky" in the sense that a
14279 /// subsequent `SetWeakOk` without this bool set to true does not reset
14280 /// the server-side bool. If this creates a problem for a participant, a
14281 /// workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
14282 /// tokens instead, as appropriate. A participant should only set
14283 /// `for_child_nodes_also` true if the participant can really promise to
14284 /// obey `close_weak_asap` both for its own weak VMO handles, and for all
14285 /// weak VMO handles held by participants holding the corresponding child
14286 /// `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
14287 /// which are using sysmem(1) can be weak, despite the clients of those
14288 /// sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
14289 /// direct way to find out about `close_weak_asap`. This only applies to
14290 /// descendents of this `Node` which are using sysmem(1), not to this
14291 /// `Node` when converted directly from a sysmem2 token to a sysmem(1)
14292 /// token, which will fail allocation unless an ancestor of this `Node`
14293 /// specified `for_child_nodes_also` true.
14294 SetWeakOk { payload: NodeSetWeakOkRequest, control_handle: NodeControlHandle },
14295 /// The server_end will be closed after this `Node` and any child nodes have
14296 /// have released their buffer counts, making those counts available for
14297 /// reservation by a different `Node` via
14298 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
14299 ///
14300 /// The `Node` buffer counts may not be released until the entire tree of
14301 /// `Node`(s) is closed or failed, because
14302 /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
14303 /// does not immediately un-reserve the `Node` buffer counts. Instead, the
14304 /// `Node` buffer counts remain reserved until the orphaned node is later
14305 /// cleaned up.
14306 ///
14307 /// If the `Node` exceeds a fairly large number of attached eventpair server
14308 /// ends, a log message will indicate this and the `Node` (and the
14309 /// appropriate) sub-tree will fail.
14310 ///
14311 /// The `server_end` will remain open when
14312 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
14313 /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
14314 /// [`fuchsia.sysmem2/BufferCollection`].
14315 ///
14316 /// This message can also be used with a
14317 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
14318 AttachNodeTracking { payload: NodeAttachNodeTrackingRequest, control_handle: NodeControlHandle },
14319 /// An interaction was received which does not match any known method.
14320 #[non_exhaustive]
14321 _UnknownMethod {
14322 /// Ordinal of the method that was called.
14323 ordinal: u64,
14324 control_handle: NodeControlHandle,
14325 method_type: fidl::MethodType,
14326 },
14327}
14328
14329impl NodeRequest {
14330 #[allow(irrefutable_let_patterns)]
14331 pub fn into_sync(self) -> Option<(NodeSyncResponder)> {
14332 if let NodeRequest::Sync { responder } = self { Some((responder)) } else { None }
14333 }
14334
14335 #[allow(irrefutable_let_patterns)]
14336 pub fn into_release(self) -> Option<(NodeControlHandle)> {
14337 if let NodeRequest::Release { control_handle } = self {
14338 Some((control_handle))
14339 } else {
14340 None
14341 }
14342 }
14343
14344 #[allow(irrefutable_let_patterns)]
14345 pub fn into_set_name(self) -> Option<(NodeSetNameRequest, NodeControlHandle)> {
14346 if let NodeRequest::SetName { payload, control_handle } = self {
14347 Some((payload, control_handle))
14348 } else {
14349 None
14350 }
14351 }
14352
14353 #[allow(irrefutable_let_patterns)]
14354 pub fn into_set_debug_client_info(
14355 self,
14356 ) -> Option<(NodeSetDebugClientInfoRequest, NodeControlHandle)> {
14357 if let NodeRequest::SetDebugClientInfo { payload, control_handle } = self {
14358 Some((payload, control_handle))
14359 } else {
14360 None
14361 }
14362 }
14363
14364 #[allow(irrefutable_let_patterns)]
14365 pub fn into_set_debug_timeout_log_deadline(
14366 self,
14367 ) -> Option<(NodeSetDebugTimeoutLogDeadlineRequest, NodeControlHandle)> {
14368 if let NodeRequest::SetDebugTimeoutLogDeadline { payload, control_handle } = self {
14369 Some((payload, control_handle))
14370 } else {
14371 None
14372 }
14373 }
14374
14375 #[allow(irrefutable_let_patterns)]
14376 pub fn into_set_verbose_logging(self) -> Option<(NodeControlHandle)> {
14377 if let NodeRequest::SetVerboseLogging { control_handle } = self {
14378 Some((control_handle))
14379 } else {
14380 None
14381 }
14382 }
14383
14384 #[allow(irrefutable_let_patterns)]
14385 pub fn into_get_node_ref(self) -> Option<(NodeGetNodeRefResponder)> {
14386 if let NodeRequest::GetNodeRef { responder } = self { Some((responder)) } else { None }
14387 }
14388
14389 #[allow(irrefutable_let_patterns)]
14390 pub fn into_is_alternate_for(
14391 self,
14392 ) -> Option<(NodeIsAlternateForRequest, NodeIsAlternateForResponder)> {
14393 if let NodeRequest::IsAlternateFor { payload, responder } = self {
14394 Some((payload, responder))
14395 } else {
14396 None
14397 }
14398 }
14399
14400 #[allow(irrefutable_let_patterns)]
14401 pub fn into_get_buffer_collection_id(self) -> Option<(NodeGetBufferCollectionIdResponder)> {
14402 if let NodeRequest::GetBufferCollectionId { responder } = self {
14403 Some((responder))
14404 } else {
14405 None
14406 }
14407 }
14408
14409 #[allow(irrefutable_let_patterns)]
14410 pub fn into_set_weak(self) -> Option<(NodeControlHandle)> {
14411 if let NodeRequest::SetWeak { control_handle } = self {
14412 Some((control_handle))
14413 } else {
14414 None
14415 }
14416 }
14417
14418 #[allow(irrefutable_let_patterns)]
14419 pub fn into_set_weak_ok(self) -> Option<(NodeSetWeakOkRequest, NodeControlHandle)> {
14420 if let NodeRequest::SetWeakOk { payload, control_handle } = self {
14421 Some((payload, control_handle))
14422 } else {
14423 None
14424 }
14425 }
14426
14427 #[allow(irrefutable_let_patterns)]
14428 pub fn into_attach_node_tracking(
14429 self,
14430 ) -> Option<(NodeAttachNodeTrackingRequest, NodeControlHandle)> {
14431 if let NodeRequest::AttachNodeTracking { payload, control_handle } = self {
14432 Some((payload, control_handle))
14433 } else {
14434 None
14435 }
14436 }
14437
14438 /// Name of the method defined in FIDL
14439 pub fn method_name(&self) -> &'static str {
14440 match *self {
14441 NodeRequest::Sync { .. } => "sync",
14442 NodeRequest::Release { .. } => "release",
14443 NodeRequest::SetName { .. } => "set_name",
14444 NodeRequest::SetDebugClientInfo { .. } => "set_debug_client_info",
14445 NodeRequest::SetDebugTimeoutLogDeadline { .. } => "set_debug_timeout_log_deadline",
14446 NodeRequest::SetVerboseLogging { .. } => "set_verbose_logging",
14447 NodeRequest::GetNodeRef { .. } => "get_node_ref",
14448 NodeRequest::IsAlternateFor { .. } => "is_alternate_for",
14449 NodeRequest::GetBufferCollectionId { .. } => "get_buffer_collection_id",
14450 NodeRequest::SetWeak { .. } => "set_weak",
14451 NodeRequest::SetWeakOk { .. } => "set_weak_ok",
14452 NodeRequest::AttachNodeTracking { .. } => "attach_node_tracking",
14453 NodeRequest::_UnknownMethod { method_type: fidl::MethodType::OneWay, .. } => {
14454 "unknown one-way method"
14455 }
14456 NodeRequest::_UnknownMethod { method_type: fidl::MethodType::TwoWay, .. } => {
14457 "unknown two-way method"
14458 }
14459 }
14460 }
14461}
14462
14463#[derive(Debug, Clone)]
14464pub struct NodeControlHandle {
14465 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
14466}
14467
14468impl fidl::endpoints::ControlHandle for NodeControlHandle {
14469 fn shutdown(&self) {
14470 self.inner.shutdown()
14471 }
14472
14473 fn shutdown_with_epitaph(&self, status: zx_status::Status) {
14474 self.inner.shutdown_with_epitaph(status)
14475 }
14476
14477 fn is_closed(&self) -> bool {
14478 self.inner.channel().is_closed()
14479 }
14480 fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
14481 self.inner.channel().on_closed()
14482 }
14483
14484 #[cfg(target_os = "fuchsia")]
14485 fn signal_peer(
14486 &self,
14487 clear_mask: zx::Signals,
14488 set_mask: zx::Signals,
14489 ) -> Result<(), zx_status::Status> {
14490 use fidl::Peered;
14491 self.inner.channel().signal_peer(clear_mask, set_mask)
14492 }
14493}
14494
14495impl NodeControlHandle {}
14496
14497#[must_use = "FIDL methods require a response to be sent"]
14498#[derive(Debug)]
14499pub struct NodeSyncResponder {
14500 control_handle: std::mem::ManuallyDrop<NodeControlHandle>,
14501 tx_id: u32,
14502}
14503
14504/// Set the the channel to be shutdown (see [`NodeControlHandle::shutdown`])
14505/// if the responder is dropped without sending a response, so that the client
14506/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
14507impl std::ops::Drop for NodeSyncResponder {
14508 fn drop(&mut self) {
14509 self.control_handle.shutdown();
14510 // Safety: drops once, never accessed again
14511 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14512 }
14513}
14514
14515impl fidl::endpoints::Responder for NodeSyncResponder {
14516 type ControlHandle = NodeControlHandle;
14517
14518 fn control_handle(&self) -> &NodeControlHandle {
14519 &self.control_handle
14520 }
14521
14522 fn drop_without_shutdown(mut self) {
14523 // Safety: drops once, never accessed again due to mem::forget
14524 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14525 // Prevent Drop from running (which would shut down the channel)
14526 std::mem::forget(self);
14527 }
14528}
14529
14530impl NodeSyncResponder {
14531 /// Sends a response to the FIDL transaction.
14532 ///
14533 /// Sets the channel to shutdown if an error occurs.
14534 pub fn send(self) -> Result<(), fidl::Error> {
14535 let _result = self.send_raw();
14536 if _result.is_err() {
14537 self.control_handle.shutdown();
14538 }
14539 self.drop_without_shutdown();
14540 _result
14541 }
14542
14543 /// Similar to "send" but does not shutdown the channel if an error occurs.
14544 pub fn send_no_shutdown_on_err(self) -> Result<(), fidl::Error> {
14545 let _result = self.send_raw();
14546 self.drop_without_shutdown();
14547 _result
14548 }
14549
14550 fn send_raw(&self) -> Result<(), fidl::Error> {
14551 self.control_handle.inner.send::<fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>>(
14552 fidl::encoding::Flexible::new(()),
14553 self.tx_id,
14554 0x11ac2555cf575b54,
14555 fidl::encoding::DynamicFlags::FLEXIBLE,
14556 )
14557 }
14558}
14559
14560#[must_use = "FIDL methods require a response to be sent"]
14561#[derive(Debug)]
14562pub struct NodeGetNodeRefResponder {
14563 control_handle: std::mem::ManuallyDrop<NodeControlHandle>,
14564 tx_id: u32,
14565}
14566
14567/// Set the the channel to be shutdown (see [`NodeControlHandle::shutdown`])
14568/// if the responder is dropped without sending a response, so that the client
14569/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
14570impl std::ops::Drop for NodeGetNodeRefResponder {
14571 fn drop(&mut self) {
14572 self.control_handle.shutdown();
14573 // Safety: drops once, never accessed again
14574 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14575 }
14576}
14577
14578impl fidl::endpoints::Responder for NodeGetNodeRefResponder {
14579 type ControlHandle = NodeControlHandle;
14580
14581 fn control_handle(&self) -> &NodeControlHandle {
14582 &self.control_handle
14583 }
14584
14585 fn drop_without_shutdown(mut self) {
14586 // Safety: drops once, never accessed again due to mem::forget
14587 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14588 // Prevent Drop from running (which would shut down the channel)
14589 std::mem::forget(self);
14590 }
14591}
14592
14593impl NodeGetNodeRefResponder {
14594 /// Sends a response to the FIDL transaction.
14595 ///
14596 /// Sets the channel to shutdown if an error occurs.
14597 pub fn send(self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
14598 let _result = self.send_raw(payload);
14599 if _result.is_err() {
14600 self.control_handle.shutdown();
14601 }
14602 self.drop_without_shutdown();
14603 _result
14604 }
14605
14606 /// Similar to "send" but does not shutdown the channel if an error occurs.
14607 pub fn send_no_shutdown_on_err(
14608 self,
14609 mut payload: NodeGetNodeRefResponse,
14610 ) -> Result<(), fidl::Error> {
14611 let _result = self.send_raw(payload);
14612 self.drop_without_shutdown();
14613 _result
14614 }
14615
14616 fn send_raw(&self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
14617 self.control_handle.inner.send::<fidl::encoding::FlexibleType<NodeGetNodeRefResponse>>(
14618 fidl::encoding::Flexible::new(&mut payload),
14619 self.tx_id,
14620 0x5b3d0e51614df053,
14621 fidl::encoding::DynamicFlags::FLEXIBLE,
14622 )
14623 }
14624}
14625
14626#[must_use = "FIDL methods require a response to be sent"]
14627#[derive(Debug)]
14628pub struct NodeIsAlternateForResponder {
14629 control_handle: std::mem::ManuallyDrop<NodeControlHandle>,
14630 tx_id: u32,
14631}
14632
14633/// Set the the channel to be shutdown (see [`NodeControlHandle::shutdown`])
14634/// if the responder is dropped without sending a response, so that the client
14635/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
14636impl std::ops::Drop for NodeIsAlternateForResponder {
14637 fn drop(&mut self) {
14638 self.control_handle.shutdown();
14639 // Safety: drops once, never accessed again
14640 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14641 }
14642}
14643
14644impl fidl::endpoints::Responder for NodeIsAlternateForResponder {
14645 type ControlHandle = NodeControlHandle;
14646
14647 fn control_handle(&self) -> &NodeControlHandle {
14648 &self.control_handle
14649 }
14650
14651 fn drop_without_shutdown(mut self) {
14652 // Safety: drops once, never accessed again due to mem::forget
14653 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14654 // Prevent Drop from running (which would shut down the channel)
14655 std::mem::forget(self);
14656 }
14657}
14658
14659impl NodeIsAlternateForResponder {
14660 /// Sends a response to the FIDL transaction.
14661 ///
14662 /// Sets the channel to shutdown if an error occurs.
14663 pub fn send(
14664 self,
14665 mut result: Result<&NodeIsAlternateForResponse, Error>,
14666 ) -> Result<(), fidl::Error> {
14667 let _result = self.send_raw(result);
14668 if _result.is_err() {
14669 self.control_handle.shutdown();
14670 }
14671 self.drop_without_shutdown();
14672 _result
14673 }
14674
14675 /// Similar to "send" but does not shutdown the channel if an error occurs.
14676 pub fn send_no_shutdown_on_err(
14677 self,
14678 mut result: Result<&NodeIsAlternateForResponse, Error>,
14679 ) -> Result<(), fidl::Error> {
14680 let _result = self.send_raw(result);
14681 self.drop_without_shutdown();
14682 _result
14683 }
14684
14685 fn send_raw(
14686 &self,
14687 mut result: Result<&NodeIsAlternateForResponse, Error>,
14688 ) -> Result<(), fidl::Error> {
14689 self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
14690 NodeIsAlternateForResponse,
14691 Error,
14692 >>(
14693 fidl::encoding::FlexibleResult::new(result),
14694 self.tx_id,
14695 0x3a58e00157e0825,
14696 fidl::encoding::DynamicFlags::FLEXIBLE,
14697 )
14698 }
14699}
14700
14701#[must_use = "FIDL methods require a response to be sent"]
14702#[derive(Debug)]
14703pub struct NodeGetBufferCollectionIdResponder {
14704 control_handle: std::mem::ManuallyDrop<NodeControlHandle>,
14705 tx_id: u32,
14706}
14707
14708/// Set the the channel to be shutdown (see [`NodeControlHandle::shutdown`])
14709/// if the responder is dropped without sending a response, so that the client
14710/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
14711impl std::ops::Drop for NodeGetBufferCollectionIdResponder {
14712 fn drop(&mut self) {
14713 self.control_handle.shutdown();
14714 // Safety: drops once, never accessed again
14715 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14716 }
14717}
14718
14719impl fidl::endpoints::Responder for NodeGetBufferCollectionIdResponder {
14720 type ControlHandle = NodeControlHandle;
14721
14722 fn control_handle(&self) -> &NodeControlHandle {
14723 &self.control_handle
14724 }
14725
14726 fn drop_without_shutdown(mut self) {
14727 // Safety: drops once, never accessed again due to mem::forget
14728 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14729 // Prevent Drop from running (which would shut down the channel)
14730 std::mem::forget(self);
14731 }
14732}
14733
14734impl NodeGetBufferCollectionIdResponder {
14735 /// Sends a response to the FIDL transaction.
14736 ///
14737 /// Sets the channel to shutdown if an error occurs.
14738 pub fn send(self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
14739 let _result = self.send_raw(payload);
14740 if _result.is_err() {
14741 self.control_handle.shutdown();
14742 }
14743 self.drop_without_shutdown();
14744 _result
14745 }
14746
14747 /// Similar to "send" but does not shutdown the channel if an error occurs.
14748 pub fn send_no_shutdown_on_err(
14749 self,
14750 mut payload: &NodeGetBufferCollectionIdResponse,
14751 ) -> Result<(), fidl::Error> {
14752 let _result = self.send_raw(payload);
14753 self.drop_without_shutdown();
14754 _result
14755 }
14756
14757 fn send_raw(&self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
14758 self.control_handle
14759 .inner
14760 .send::<fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>>(
14761 fidl::encoding::Flexible::new(payload),
14762 self.tx_id,
14763 0x77d19a494b78ba8c,
14764 fidl::encoding::DynamicFlags::FLEXIBLE,
14765 )
14766 }
14767}
14768
14769#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
14770pub struct SecureMemMarker;
14771
14772impl fidl::endpoints::ProtocolMarker for SecureMemMarker {
14773 type Proxy = SecureMemProxy;
14774 type RequestStream = SecureMemRequestStream;
14775 #[cfg(target_os = "fuchsia")]
14776 type SynchronousProxy = SecureMemSynchronousProxy;
14777
14778 const DEBUG_NAME: &'static str = "(anonymous) SecureMem";
14779}
14780pub type SecureMemGetPhysicalSecureHeapsResult =
14781 Result<SecureMemGetPhysicalSecureHeapsResponse, Error>;
14782pub type SecureMemGetDynamicSecureHeapsResult =
14783 Result<SecureMemGetDynamicSecureHeapsResponse, Error>;
14784pub type SecureMemGetPhysicalSecureHeapPropertiesResult =
14785 Result<SecureMemGetPhysicalSecureHeapPropertiesResponse, Error>;
14786pub type SecureMemAddSecureHeapPhysicalRangeResult = Result<(), Error>;
14787pub type SecureMemDeleteSecureHeapPhysicalRangeResult = Result<(), Error>;
14788pub type SecureMemModifySecureHeapPhysicalRangeResult = Result<(), Error>;
14789pub type SecureMemZeroSubRangeResult = Result<(), Error>;
14790
14791pub trait SecureMemProxyInterface: Send + Sync {
14792 type GetPhysicalSecureHeapsResponseFut: std::future::Future<Output = Result<SecureMemGetPhysicalSecureHeapsResult, fidl::Error>>
14793 + Send;
14794 fn r#get_physical_secure_heaps(&self) -> Self::GetPhysicalSecureHeapsResponseFut;
14795 type GetDynamicSecureHeapsResponseFut: std::future::Future<Output = Result<SecureMemGetDynamicSecureHeapsResult, fidl::Error>>
14796 + Send;
14797 fn r#get_dynamic_secure_heaps(&self) -> Self::GetDynamicSecureHeapsResponseFut;
14798 type GetPhysicalSecureHeapPropertiesResponseFut: std::future::Future<
14799 Output = Result<SecureMemGetPhysicalSecureHeapPropertiesResult, fidl::Error>,
14800 > + Send;
14801 fn r#get_physical_secure_heap_properties(
14802 &self,
14803 payload: &SecureMemGetPhysicalSecureHeapPropertiesRequest,
14804 ) -> Self::GetPhysicalSecureHeapPropertiesResponseFut;
14805 type AddSecureHeapPhysicalRangeResponseFut: std::future::Future<Output = Result<SecureMemAddSecureHeapPhysicalRangeResult, fidl::Error>>
14806 + Send;
14807 fn r#add_secure_heap_physical_range(
14808 &self,
14809 payload: &SecureMemAddSecureHeapPhysicalRangeRequest,
14810 ) -> Self::AddSecureHeapPhysicalRangeResponseFut;
14811 type DeleteSecureHeapPhysicalRangeResponseFut: std::future::Future<
14812 Output = Result<SecureMemDeleteSecureHeapPhysicalRangeResult, fidl::Error>,
14813 > + Send;
14814 fn r#delete_secure_heap_physical_range(
14815 &self,
14816 payload: &SecureMemDeleteSecureHeapPhysicalRangeRequest,
14817 ) -> Self::DeleteSecureHeapPhysicalRangeResponseFut;
14818 type ModifySecureHeapPhysicalRangeResponseFut: std::future::Future<
14819 Output = Result<SecureMemModifySecureHeapPhysicalRangeResult, fidl::Error>,
14820 > + Send;
14821 fn r#modify_secure_heap_physical_range(
14822 &self,
14823 payload: &SecureMemModifySecureHeapPhysicalRangeRequest,
14824 ) -> Self::ModifySecureHeapPhysicalRangeResponseFut;
14825 type ZeroSubRangeResponseFut: std::future::Future<Output = Result<SecureMemZeroSubRangeResult, fidl::Error>>
14826 + Send;
14827 fn r#zero_sub_range(
14828 &self,
14829 payload: &SecureMemZeroSubRangeRequest,
14830 ) -> Self::ZeroSubRangeResponseFut;
14831}
14832#[derive(Debug)]
14833#[cfg(target_os = "fuchsia")]
14834pub struct SecureMemSynchronousProxy {
14835 client: fidl::client::sync::Client,
14836}
14837
14838#[cfg(target_os = "fuchsia")]
14839impl fidl::endpoints::SynchronousProxy for SecureMemSynchronousProxy {
14840 type Proxy = SecureMemProxy;
14841 type Protocol = SecureMemMarker;
14842
14843 fn from_channel(inner: fidl::Channel) -> Self {
14844 Self::new(inner)
14845 }
14846
14847 fn into_channel(self) -> fidl::Channel {
14848 self.client.into_channel()
14849 }
14850
14851 fn as_channel(&self) -> &fidl::Channel {
14852 self.client.as_channel()
14853 }
14854}
14855
14856#[cfg(target_os = "fuchsia")]
14857impl SecureMemSynchronousProxy {
14858 pub fn new(channel: fidl::Channel) -> Self {
14859 let protocol_name = <SecureMemMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
14860 Self { client: fidl::client::sync::Client::new(channel, protocol_name) }
14861 }
14862
14863 pub fn into_channel(self) -> fidl::Channel {
14864 self.client.into_channel()
14865 }
14866
14867 /// Waits until an event arrives and returns it. It is safe for other
14868 /// threads to make concurrent requests while waiting for an event.
14869 pub fn wait_for_event(
14870 &self,
14871 deadline: zx::MonotonicInstant,
14872 ) -> Result<SecureMemEvent, fidl::Error> {
14873 SecureMemEvent::decode(self.client.wait_for_event(deadline)?)
14874 }
14875
14876 /// Gets the physical address and length of any secure heap whose physical
14877 /// range is configured via the TEE.
14878 ///
14879 /// Presently, these will be fixed physical addresses and lengths, with the
14880 /// location plumbed via the TEE.
14881 ///
14882 /// This is preferred over ['fuchsia.hardware.sysmem.Sysmem/RegisterHeap']
14883 /// when there isn't any special heap-specific per-VMO setup or teardown
14884 /// required.
14885 ///
14886 /// The physical range must be secured/protected by the TEE before the
14887 /// securemem driver responds to this request with success.
14888 ///
14889 /// Sysmem should only call this once. Returning zero heaps is not a
14890 /// failure.
14891 ///
14892 /// Errors:
14893 /// * PROTOCOL_DEVIATION - called more than once.
14894 /// * UNSPECIFIED - generic internal error (such as in communication
14895 /// with TEE which doesn't generate zx_status_t errors).
14896 /// * other errors are allowed; any other errors should be treated the same
14897 /// as UNSPECIFIED.
14898 pub fn r#get_physical_secure_heaps(
14899 &self,
14900 ___deadline: zx::MonotonicInstant,
14901 ) -> Result<SecureMemGetPhysicalSecureHeapsResult, fidl::Error> {
14902 let _response = self.client.send_query::<
14903 fidl::encoding::EmptyPayload,
14904 fidl::encoding::FlexibleResultType<SecureMemGetPhysicalSecureHeapsResponse, Error>,
14905 >(
14906 (),
14907 0x38716300592073e3,
14908 fidl::encoding::DynamicFlags::FLEXIBLE,
14909 ___deadline,
14910 )?
14911 .into_result::<SecureMemMarker>("get_physical_secure_heaps")?;
14912 Ok(_response.map(|x| x))
14913 }
14914
14915 /// Gets information about any secure heaps whose physical pages are not
14916 /// configured by the TEE, but by sysmem.
14917 ///
14918 /// Sysmem should only call this once. Returning zero heaps is not a
14919 /// failure.
14920 ///
14921 /// Errors:
14922 /// * PROTOCOL_DEVIATION - called more than once.
14923 /// * UNSPECIFIED - generic internal error (such as in communication
14924 /// with TEE which doesn't generate zx_status_t errors).
14925 /// * other errors are allowed; any other errors should be treated the same
14926 /// as UNSPECIFIED.
14927 pub fn r#get_dynamic_secure_heaps(
14928 &self,
14929 ___deadline: zx::MonotonicInstant,
14930 ) -> Result<SecureMemGetDynamicSecureHeapsResult, fidl::Error> {
14931 let _response = self.client.send_query::<
14932 fidl::encoding::EmptyPayload,
14933 fidl::encoding::FlexibleResultType<SecureMemGetDynamicSecureHeapsResponse, Error>,
14934 >(
14935 (),
14936 0x1190847f99952834,
14937 fidl::encoding::DynamicFlags::FLEXIBLE,
14938 ___deadline,
14939 )?
14940 .into_result::<SecureMemMarker>("get_dynamic_secure_heaps")?;
14941 Ok(_response.map(|x| x))
14942 }
14943
14944 /// This request from sysmem to the securemem driver gets the properties of
14945 /// a protected/secure heap.
14946 ///
14947 /// This only handles heaps with a single contiguous physical extent.
14948 ///
14949 /// The heap's entire physical range is indicated in case this request needs
14950 /// some physical space to auto-detect how many ranges are REE-usable. Any
14951 /// temporary HW protection ranges will be deleted before this request
14952 /// completes.
14953 ///
14954 /// Errors:
14955 /// * UNSPECIFIED - generic internal error (such as in communication
14956 /// with TEE which doesn't generate zx_status_t errors).
14957 /// * other errors are allowed; any other errors should be treated the same
14958 /// as UNSPECIFIED.
14959 pub fn r#get_physical_secure_heap_properties(
14960 &self,
14961 mut payload: &SecureMemGetPhysicalSecureHeapPropertiesRequest,
14962 ___deadline: zx::MonotonicInstant,
14963 ) -> Result<SecureMemGetPhysicalSecureHeapPropertiesResult, fidl::Error> {
14964 let _response = self.client.send_query::<
14965 SecureMemGetPhysicalSecureHeapPropertiesRequest,
14966 fidl::encoding::FlexibleResultType<SecureMemGetPhysicalSecureHeapPropertiesResponse, Error>,
14967 >(
14968 payload,
14969 0xc6f06889009c7bc,
14970 fidl::encoding::DynamicFlags::FLEXIBLE,
14971 ___deadline,
14972 )?
14973 .into_result::<SecureMemMarker>("get_physical_secure_heap_properties")?;
14974 Ok(_response.map(|x| x))
14975 }
14976
14977 /// This request from sysmem to the securemem driver conveys a physical
14978 /// range to add, for a heap whose physical range(s) are set up via
14979 /// sysmem.
14980 ///
14981 /// Only sysmem can call this because only sysmem is handed the client end
14982 /// of a FIDL channel serving this protocol, via RegisterSecureMem(). The
14983 /// securemem driver is the server end of this protocol.
14984 ///
14985 /// The securemem driver must configure all the covered offsets as protected
14986 /// before responding to this message with success.
14987 ///
14988 /// On failure, the securemem driver must ensure the protected range was not
14989 /// created.
14990 ///
14991 /// Sysmem must only call this up to once if dynamic_protection_ranges
14992 /// false.
14993 ///
14994 /// If dynamic_protection_ranges is true, sysmem can call this multiple
14995 /// times as long as the current number of ranges never exceeds
14996 /// max_protected_range_count.
14997 ///
14998 /// The caller must not attempt to add a range that matches an
14999 /// already-existing range. Added ranges can overlap each other as long as
15000 /// no two ranges match exactly.
15001 ///
15002 /// Errors:
15003 /// * PROTOCOL_DEVIATION - called more than once when
15004 /// !dynamic_protection_ranges. Adding a heap that would cause overall
15005 /// heap count to exceed max_protected_range_count. Unexpected heap, or
15006 /// range that doesn't conform to protected_range_granularity. See log.
15007 /// * UNSPECIFIED - generic internal error (such as in communication
15008 /// with TEE which doesn't generate zx_status_t errors).
15009 /// * other errors are possible, such as from communication failures or
15010 /// server propagation of failures.
15011 pub fn r#add_secure_heap_physical_range(
15012 &self,
15013 mut payload: &SecureMemAddSecureHeapPhysicalRangeRequest,
15014 ___deadline: zx::MonotonicInstant,
15015 ) -> Result<SecureMemAddSecureHeapPhysicalRangeResult, fidl::Error> {
15016 let _response = self.client.send_query::<
15017 SecureMemAddSecureHeapPhysicalRangeRequest,
15018 fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
15019 >(
15020 payload,
15021 0x35f695b9b6c7217a,
15022 fidl::encoding::DynamicFlags::FLEXIBLE,
15023 ___deadline,
15024 )?
15025 .into_result::<SecureMemMarker>("add_secure_heap_physical_range")?;
15026 Ok(_response.map(|x| x))
15027 }
15028
15029 /// This request from sysmem to the securemem driver conveys a physical
15030 /// range to delete, for a heap whose physical range(s) are set up via
15031 /// sysmem.
15032 ///
15033 /// Only sysmem can call this because only sysmem is handed the client end
15034 /// of a FIDL channel serving this protocol, via RegisterSecureMem(). The
15035 /// securemem driver is the server end of this protocol.
15036 ///
15037 /// The securemem driver must configure all the covered offsets as not
15038 /// protected before responding to this message with success.
15039 ///
15040 /// On failure, the securemem driver must ensure the protected range was not
15041 /// deleted.
15042 ///
15043 /// Sysmem must not call this if dynamic_protection_ranges false.
15044 ///
15045 /// If dynamic_protection_ranges is true, sysmem can call this repeatedly,
15046 /// on various ranges that exist at the time of the call.
15047 ///
15048 /// If any portion of the range being deleted is not also covered by another
15049 /// protected range, then any ongoing DMA to any part of the entire range
15050 /// may be interrupted / may fail, potentially in a way that's disruptive to
15051 /// the entire system (bus lockup or similar, depending on device details).
15052 /// Therefore, the caller must ensure that no ongoing DMA is occurring to
15053 /// any portion of the range being deleted, unless the caller has other
15054 /// active ranges covering every block of the range being deleted. Ongoing
15055 /// DMA to/from blocks outside the range being deleted is never impacted by
15056 /// the deletion.
15057 ///
15058 /// Errors:
15059 /// * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
15060 /// Unexpected heap, or range that doesn't conform to
15061 /// protected_range_granularity.
15062 /// * UNSPECIFIED - generic internal error (such as in communication
15063 /// with TEE which doesn't generate zx_status_t errors).
15064 /// * NOT_FOUND - the specified range is not found.
15065 /// * other errors are possible, such as from communication failures or
15066 /// server propagation of failures.
15067 pub fn r#delete_secure_heap_physical_range(
15068 &self,
15069 mut payload: &SecureMemDeleteSecureHeapPhysicalRangeRequest,
15070 ___deadline: zx::MonotonicInstant,
15071 ) -> Result<SecureMemDeleteSecureHeapPhysicalRangeResult, fidl::Error> {
15072 let _response = self.client.send_query::<
15073 SecureMemDeleteSecureHeapPhysicalRangeRequest,
15074 fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
15075 >(
15076 payload,
15077 0xeaa58c650264c9e,
15078 fidl::encoding::DynamicFlags::FLEXIBLE,
15079 ___deadline,
15080 )?
15081 .into_result::<SecureMemMarker>("delete_secure_heap_physical_range")?;
15082 Ok(_response.map(|x| x))
15083 }
15084
15085 /// This request from sysmem to the securemem driver conveys a physical
15086 /// range to modify and its new base and length, for a heap whose physical
15087 /// range(s) are set up via sysmem.
15088 ///
15089 /// Only sysmem can call this because only sysmem is handed the client end
15090 /// of a FIDL channel serving this protocol, via RegisterSecureMem(). The
15091 /// securemem driver is the server end of this protocol.
15092 ///
15093 /// The securemem driver must configure the range to cover only the new
15094 /// offsets before responding to this message with success.
15095 ///
15096 /// On failure, the securemem driver must ensure the range was not changed.
15097 ///
15098 /// Sysmem must not call this if dynamic_protection_ranges false. Sysmem
15099 /// must not call this if !is_mod_protected_range_available.
15100 ///
15101 /// If dynamic_protection_ranges is true, sysmem can call this repeatedly,
15102 /// on various ranges that exist at the time of the call.
15103 ///
15104 /// The range must only be modified at one end or the other, but not both.
15105 /// If the range is getting shorter, and the un-covered blocks are not
15106 /// covered by other active ranges, any ongoing DMA to the entire range
15107 /// that's geting shorter may fail in a way that disrupts the entire system
15108 /// (bus lockup or similar), so the caller must ensure that no DMA is
15109 /// ongoing to any portion of a range that is getting shorter, unless the
15110 /// blocks being un-covered by the modification to this range are all
15111 /// covered by other active ranges, in which case no disruption to ongoing
15112 /// DMA will occur.
15113 ///
15114 /// If a range is modified to become <= zero length, the range is deleted.
15115 ///
15116 /// Errors:
15117 /// * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
15118 /// Unexpected heap, or old_range or new_range that doesn't conform to
15119 /// protected_range_granularity, or old_range and new_range differ in
15120 /// both begin and end (disallowed).
15121 /// * UNSPECIFIED - generic internal error (such as in communication
15122 /// with TEE which doesn't generate zx_status_t errors).
15123 /// * NOT_FOUND - the specified range is not found.
15124 /// * other errors are possible, such as from communication failures or
15125 /// server propagation of failures.
15126 pub fn r#modify_secure_heap_physical_range(
15127 &self,
15128 mut payload: &SecureMemModifySecureHeapPhysicalRangeRequest,
15129 ___deadline: zx::MonotonicInstant,
15130 ) -> Result<SecureMemModifySecureHeapPhysicalRangeResult, fidl::Error> {
15131 let _response = self.client.send_query::<
15132 SecureMemModifySecureHeapPhysicalRangeRequest,
15133 fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
15134 >(
15135 payload,
15136 0x60b7448aa1187734,
15137 fidl::encoding::DynamicFlags::FLEXIBLE,
15138 ___deadline,
15139 )?
15140 .into_result::<SecureMemMarker>("modify_secure_heap_physical_range")?;
15141 Ok(_response.map(|x| x))
15142 }
15143
15144 /// Zero a sub-range of a currently-existing physical range added via
15145 /// AddSecureHeapPhysicalRange(). The sub-range must be fully covered by
15146 /// exactly one physical range, and must not overlap with any other
15147 /// physical range.
15148 ///
15149 /// is_covering_range_explicit - When true, the covering range must be one
15150 /// of the ranges explicitly created via AddSecureHeapPhysicalRange(),
15151 /// possibly modified since. When false, the covering range must not
15152 /// be one of the ranges explicitly created via
15153 /// AddSecureHeapPhysicalRange(), but the covering range must exist as
15154 /// a covering range not created via AddSecureHeapPhysicalRange(). The
15155 /// covering range is typically the entire physical range (or a range
15156 /// which covers even more) of a heap configured by the TEE and whose
15157 /// configuration is conveyed to sysmem via GetPhysicalSecureHeaps().
15158 ///
15159 /// Ongoing DMA is not disrupted by this request.
15160 ///
15161 /// Errors:
15162 /// * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
15163 /// Unexpected heap.
15164 /// * UNSPECIFIED - generic internal error (such as in communication
15165 /// with TEE which doesn't generate zx_status_t errors).
15166 /// * other errors are possible, such as from communication failures or
15167 /// server propagation of failures.
15168 pub fn r#zero_sub_range(
15169 &self,
15170 mut payload: &SecureMemZeroSubRangeRequest,
15171 ___deadline: zx::MonotonicInstant,
15172 ) -> Result<SecureMemZeroSubRangeResult, fidl::Error> {
15173 let _response = self.client.send_query::<
15174 SecureMemZeroSubRangeRequest,
15175 fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
15176 >(
15177 payload,
15178 0x5b25b7901a385ce5,
15179 fidl::encoding::DynamicFlags::FLEXIBLE,
15180 ___deadline,
15181 )?
15182 .into_result::<SecureMemMarker>("zero_sub_range")?;
15183 Ok(_response.map(|x| x))
15184 }
15185}
15186
15187#[cfg(target_os = "fuchsia")]
15188impl From<SecureMemSynchronousProxy> for zx::NullableHandle {
15189 fn from(value: SecureMemSynchronousProxy) -> Self {
15190 value.into_channel().into()
15191 }
15192}
15193
15194#[cfg(target_os = "fuchsia")]
15195impl From<fidl::Channel> for SecureMemSynchronousProxy {
15196 fn from(value: fidl::Channel) -> Self {
15197 Self::new(value)
15198 }
15199}
15200
15201#[cfg(target_os = "fuchsia")]
15202impl fidl::endpoints::FromClient for SecureMemSynchronousProxy {
15203 type Protocol = SecureMemMarker;
15204
15205 fn from_client(value: fidl::endpoints::ClientEnd<SecureMemMarker>) -> Self {
15206 Self::new(value.into_channel())
15207 }
15208}
15209
15210#[derive(Debug, Clone)]
15211pub struct SecureMemProxy {
15212 client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
15213}
15214
15215impl fidl::endpoints::Proxy for SecureMemProxy {
15216 type Protocol = SecureMemMarker;
15217
15218 fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
15219 Self::new(inner)
15220 }
15221
15222 fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
15223 self.client.into_channel().map_err(|client| Self { client })
15224 }
15225
15226 fn as_channel(&self) -> &::fidl::AsyncChannel {
15227 self.client.as_channel()
15228 }
15229}
15230
15231impl SecureMemProxy {
15232 /// Create a new Proxy for fuchsia.sysmem2/SecureMem.
15233 pub fn new(channel: ::fidl::AsyncChannel) -> Self {
15234 let protocol_name = <SecureMemMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
15235 Self { client: fidl::client::Client::new(channel, protocol_name) }
15236 }
15237
15238 /// Get a Stream of events from the remote end of the protocol.
15239 ///
15240 /// # Panics
15241 ///
15242 /// Panics if the event stream was already taken.
15243 pub fn take_event_stream(&self) -> SecureMemEventStream {
15244 SecureMemEventStream { event_receiver: self.client.take_event_receiver() }
15245 }
15246
15247 /// Gets the physical address and length of any secure heap whose physical
15248 /// range is configured via the TEE.
15249 ///
15250 /// Presently, these will be fixed physical addresses and lengths, with the
15251 /// location plumbed via the TEE.
15252 ///
15253 /// This is preferred over ['fuchsia.hardware.sysmem.Sysmem/RegisterHeap']
15254 /// when there isn't any special heap-specific per-VMO setup or teardown
15255 /// required.
15256 ///
15257 /// The physical range must be secured/protected by the TEE before the
15258 /// securemem driver responds to this request with success.
15259 ///
15260 /// Sysmem should only call this once. Returning zero heaps is not a
15261 /// failure.
15262 ///
15263 /// Errors:
15264 /// * PROTOCOL_DEVIATION - called more than once.
15265 /// * UNSPECIFIED - generic internal error (such as in communication
15266 /// with TEE which doesn't generate zx_status_t errors).
15267 /// * other errors are allowed; any other errors should be treated the same
15268 /// as UNSPECIFIED.
15269 pub fn r#get_physical_secure_heaps(
15270 &self,
15271 ) -> fidl::client::QueryResponseFut<
15272 SecureMemGetPhysicalSecureHeapsResult,
15273 fidl::encoding::DefaultFuchsiaResourceDialect,
15274 > {
15275 SecureMemProxyInterface::r#get_physical_secure_heaps(self)
15276 }
15277
15278 /// Gets information about any secure heaps whose physical pages are not
15279 /// configured by the TEE, but by sysmem.
15280 ///
15281 /// Sysmem should only call this once. Returning zero heaps is not a
15282 /// failure.
15283 ///
15284 /// Errors:
15285 /// * PROTOCOL_DEVIATION - called more than once.
15286 /// * UNSPECIFIED - generic internal error (such as in communication
15287 /// with TEE which doesn't generate zx_status_t errors).
15288 /// * other errors are allowed; any other errors should be treated the same
15289 /// as UNSPECIFIED.
15290 pub fn r#get_dynamic_secure_heaps(
15291 &self,
15292 ) -> fidl::client::QueryResponseFut<
15293 SecureMemGetDynamicSecureHeapsResult,
15294 fidl::encoding::DefaultFuchsiaResourceDialect,
15295 > {
15296 SecureMemProxyInterface::r#get_dynamic_secure_heaps(self)
15297 }
15298
15299 /// This request from sysmem to the securemem driver gets the properties of
15300 /// a protected/secure heap.
15301 ///
15302 /// This only handles heaps with a single contiguous physical extent.
15303 ///
15304 /// The heap's entire physical range is indicated in case this request needs
15305 /// some physical space to auto-detect how many ranges are REE-usable. Any
15306 /// temporary HW protection ranges will be deleted before this request
15307 /// completes.
15308 ///
15309 /// Errors:
15310 /// * UNSPECIFIED - generic internal error (such as in communication
15311 /// with TEE which doesn't generate zx_status_t errors).
15312 /// * other errors are allowed; any other errors should be treated the same
15313 /// as UNSPECIFIED.
15314 pub fn r#get_physical_secure_heap_properties(
15315 &self,
15316 mut payload: &SecureMemGetPhysicalSecureHeapPropertiesRequest,
15317 ) -> fidl::client::QueryResponseFut<
15318 SecureMemGetPhysicalSecureHeapPropertiesResult,
15319 fidl::encoding::DefaultFuchsiaResourceDialect,
15320 > {
15321 SecureMemProxyInterface::r#get_physical_secure_heap_properties(self, payload)
15322 }
15323
15324 /// This request from sysmem to the securemem driver conveys a physical
15325 /// range to add, for a heap whose physical range(s) are set up via
15326 /// sysmem.
15327 ///
15328 /// Only sysmem can call this because only sysmem is handed the client end
15329 /// of a FIDL channel serving this protocol, via RegisterSecureMem(). The
15330 /// securemem driver is the server end of this protocol.
15331 ///
15332 /// The securemem driver must configure all the covered offsets as protected
15333 /// before responding to this message with success.
15334 ///
15335 /// On failure, the securemem driver must ensure the protected range was not
15336 /// created.
15337 ///
15338 /// Sysmem must only call this up to once if dynamic_protection_ranges
15339 /// false.
15340 ///
15341 /// If dynamic_protection_ranges is true, sysmem can call this multiple
15342 /// times as long as the current number of ranges never exceeds
15343 /// max_protected_range_count.
15344 ///
15345 /// The caller must not attempt to add a range that matches an
15346 /// already-existing range. Added ranges can overlap each other as long as
15347 /// no two ranges match exactly.
15348 ///
15349 /// Errors:
15350 /// * PROTOCOL_DEVIATION - called more than once when
15351 /// !dynamic_protection_ranges. Adding a heap that would cause overall
15352 /// heap count to exceed max_protected_range_count. Unexpected heap, or
15353 /// range that doesn't conform to protected_range_granularity. See log.
15354 /// * UNSPECIFIED - generic internal error (such as in communication
15355 /// with TEE which doesn't generate zx_status_t errors).
15356 /// * other errors are possible, such as from communication failures or
15357 /// server propagation of failures.
15358 pub fn r#add_secure_heap_physical_range(
15359 &self,
15360 mut payload: &SecureMemAddSecureHeapPhysicalRangeRequest,
15361 ) -> fidl::client::QueryResponseFut<
15362 SecureMemAddSecureHeapPhysicalRangeResult,
15363 fidl::encoding::DefaultFuchsiaResourceDialect,
15364 > {
15365 SecureMemProxyInterface::r#add_secure_heap_physical_range(self, payload)
15366 }
15367
15368 /// This request from sysmem to the securemem driver conveys a physical
15369 /// range to delete, for a heap whose physical range(s) are set up via
15370 /// sysmem.
15371 ///
15372 /// Only sysmem can call this because only sysmem is handed the client end
15373 /// of a FIDL channel serving this protocol, via RegisterSecureMem(). The
15374 /// securemem driver is the server end of this protocol.
15375 ///
15376 /// The securemem driver must configure all the covered offsets as not
15377 /// protected before responding to this message with success.
15378 ///
15379 /// On failure, the securemem driver must ensure the protected range was not
15380 /// deleted.
15381 ///
15382 /// Sysmem must not call this if dynamic_protection_ranges false.
15383 ///
15384 /// If dynamic_protection_ranges is true, sysmem can call this repeatedly,
15385 /// on various ranges that exist at the time of the call.
15386 ///
15387 /// If any portion of the range being deleted is not also covered by another
15388 /// protected range, then any ongoing DMA to any part of the entire range
15389 /// may be interrupted / may fail, potentially in a way that's disruptive to
15390 /// the entire system (bus lockup or similar, depending on device details).
15391 /// Therefore, the caller must ensure that no ongoing DMA is occurring to
15392 /// any portion of the range being deleted, unless the caller has other
15393 /// active ranges covering every block of the range being deleted. Ongoing
15394 /// DMA to/from blocks outside the range being deleted is never impacted by
15395 /// the deletion.
15396 ///
15397 /// Errors:
15398 /// * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
15399 /// Unexpected heap, or range that doesn't conform to
15400 /// protected_range_granularity.
15401 /// * UNSPECIFIED - generic internal error (such as in communication
15402 /// with TEE which doesn't generate zx_status_t errors).
15403 /// * NOT_FOUND - the specified range is not found.
15404 /// * other errors are possible, such as from communication failures or
15405 /// server propagation of failures.
15406 pub fn r#delete_secure_heap_physical_range(
15407 &self,
15408 mut payload: &SecureMemDeleteSecureHeapPhysicalRangeRequest,
15409 ) -> fidl::client::QueryResponseFut<
15410 SecureMemDeleteSecureHeapPhysicalRangeResult,
15411 fidl::encoding::DefaultFuchsiaResourceDialect,
15412 > {
15413 SecureMemProxyInterface::r#delete_secure_heap_physical_range(self, payload)
15414 }
15415
15416 /// This request from sysmem to the securemem driver conveys a physical
15417 /// range to modify and its new base and length, for a heap whose physical
15418 /// range(s) are set up via sysmem.
15419 ///
15420 /// Only sysmem can call this because only sysmem is handed the client end
15421 /// of a FIDL channel serving this protocol, via RegisterSecureMem(). The
15422 /// securemem driver is the server end of this protocol.
15423 ///
15424 /// The securemem driver must configure the range to cover only the new
15425 /// offsets before responding to this message with success.
15426 ///
15427 /// On failure, the securemem driver must ensure the range was not changed.
15428 ///
15429 /// Sysmem must not call this if dynamic_protection_ranges false. Sysmem
15430 /// must not call this if !is_mod_protected_range_available.
15431 ///
15432 /// If dynamic_protection_ranges is true, sysmem can call this repeatedly,
15433 /// on various ranges that exist at the time of the call.
15434 ///
15435 /// The range must only be modified at one end or the other, but not both.
15436 /// If the range is getting shorter, and the un-covered blocks are not
15437 /// covered by other active ranges, any ongoing DMA to the entire range
15438 /// that's geting shorter may fail in a way that disrupts the entire system
15439 /// (bus lockup or similar), so the caller must ensure that no DMA is
15440 /// ongoing to any portion of a range that is getting shorter, unless the
15441 /// blocks being un-covered by the modification to this range are all
15442 /// covered by other active ranges, in which case no disruption to ongoing
15443 /// DMA will occur.
15444 ///
15445 /// If a range is modified to become <= zero length, the range is deleted.
15446 ///
15447 /// Errors:
15448 /// * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
15449 /// Unexpected heap, or old_range or new_range that doesn't conform to
15450 /// protected_range_granularity, or old_range and new_range differ in
15451 /// both begin and end (disallowed).
15452 /// * UNSPECIFIED - generic internal error (such as in communication
15453 /// with TEE which doesn't generate zx_status_t errors).
15454 /// * NOT_FOUND - the specified range is not found.
15455 /// * other errors are possible, such as from communication failures or
15456 /// server propagation of failures.
15457 pub fn r#modify_secure_heap_physical_range(
15458 &self,
15459 mut payload: &SecureMemModifySecureHeapPhysicalRangeRequest,
15460 ) -> fidl::client::QueryResponseFut<
15461 SecureMemModifySecureHeapPhysicalRangeResult,
15462 fidl::encoding::DefaultFuchsiaResourceDialect,
15463 > {
15464 SecureMemProxyInterface::r#modify_secure_heap_physical_range(self, payload)
15465 }
15466
15467 /// Zero a sub-range of a currently-existing physical range added via
15468 /// AddSecureHeapPhysicalRange(). The sub-range must be fully covered by
15469 /// exactly one physical range, and must not overlap with any other
15470 /// physical range.
15471 ///
15472 /// is_covering_range_explicit - When true, the covering range must be one
15473 /// of the ranges explicitly created via AddSecureHeapPhysicalRange(),
15474 /// possibly modified since. When false, the covering range must not
15475 /// be one of the ranges explicitly created via
15476 /// AddSecureHeapPhysicalRange(), but the covering range must exist as
15477 /// a covering range not created via AddSecureHeapPhysicalRange(). The
15478 /// covering range is typically the entire physical range (or a range
15479 /// which covers even more) of a heap configured by the TEE and whose
15480 /// configuration is conveyed to sysmem via GetPhysicalSecureHeaps().
15481 ///
15482 /// Ongoing DMA is not disrupted by this request.
15483 ///
15484 /// Errors:
15485 /// * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
15486 /// Unexpected heap.
15487 /// * UNSPECIFIED - generic internal error (such as in communication
15488 /// with TEE which doesn't generate zx_status_t errors).
15489 /// * other errors are possible, such as from communication failures or
15490 /// server propagation of failures.
15491 pub fn r#zero_sub_range(
15492 &self,
15493 mut payload: &SecureMemZeroSubRangeRequest,
15494 ) -> fidl::client::QueryResponseFut<
15495 SecureMemZeroSubRangeResult,
15496 fidl::encoding::DefaultFuchsiaResourceDialect,
15497 > {
15498 SecureMemProxyInterface::r#zero_sub_range(self, payload)
15499 }
15500}
15501
15502impl SecureMemProxyInterface for SecureMemProxy {
15503 type GetPhysicalSecureHeapsResponseFut = fidl::client::QueryResponseFut<
15504 SecureMemGetPhysicalSecureHeapsResult,
15505 fidl::encoding::DefaultFuchsiaResourceDialect,
15506 >;
15507 fn r#get_physical_secure_heaps(&self) -> Self::GetPhysicalSecureHeapsResponseFut {
15508 fn _decode(
15509 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
15510 ) -> Result<SecureMemGetPhysicalSecureHeapsResult, fidl::Error> {
15511 let _response = fidl::client::decode_transaction_body::<
15512 fidl::encoding::FlexibleResultType<SecureMemGetPhysicalSecureHeapsResponse, Error>,
15513 fidl::encoding::DefaultFuchsiaResourceDialect,
15514 0x38716300592073e3,
15515 >(_buf?)?
15516 .into_result::<SecureMemMarker>("get_physical_secure_heaps")?;
15517 Ok(_response.map(|x| x))
15518 }
15519 self.client.send_query_and_decode::<
15520 fidl::encoding::EmptyPayload,
15521 SecureMemGetPhysicalSecureHeapsResult,
15522 >(
15523 (),
15524 0x38716300592073e3,
15525 fidl::encoding::DynamicFlags::FLEXIBLE,
15526 _decode,
15527 )
15528 }
15529
15530 type GetDynamicSecureHeapsResponseFut = fidl::client::QueryResponseFut<
15531 SecureMemGetDynamicSecureHeapsResult,
15532 fidl::encoding::DefaultFuchsiaResourceDialect,
15533 >;
15534 fn r#get_dynamic_secure_heaps(&self) -> Self::GetDynamicSecureHeapsResponseFut {
15535 fn _decode(
15536 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
15537 ) -> Result<SecureMemGetDynamicSecureHeapsResult, fidl::Error> {
15538 let _response = fidl::client::decode_transaction_body::<
15539 fidl::encoding::FlexibleResultType<SecureMemGetDynamicSecureHeapsResponse, Error>,
15540 fidl::encoding::DefaultFuchsiaResourceDialect,
15541 0x1190847f99952834,
15542 >(_buf?)?
15543 .into_result::<SecureMemMarker>("get_dynamic_secure_heaps")?;
15544 Ok(_response.map(|x| x))
15545 }
15546 self.client.send_query_and_decode::<
15547 fidl::encoding::EmptyPayload,
15548 SecureMemGetDynamicSecureHeapsResult,
15549 >(
15550 (),
15551 0x1190847f99952834,
15552 fidl::encoding::DynamicFlags::FLEXIBLE,
15553 _decode,
15554 )
15555 }
15556
15557 type GetPhysicalSecureHeapPropertiesResponseFut = fidl::client::QueryResponseFut<
15558 SecureMemGetPhysicalSecureHeapPropertiesResult,
15559 fidl::encoding::DefaultFuchsiaResourceDialect,
15560 >;
15561 fn r#get_physical_secure_heap_properties(
15562 &self,
15563 mut payload: &SecureMemGetPhysicalSecureHeapPropertiesRequest,
15564 ) -> Self::GetPhysicalSecureHeapPropertiesResponseFut {
15565 fn _decode(
15566 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
15567 ) -> Result<SecureMemGetPhysicalSecureHeapPropertiesResult, fidl::Error> {
15568 let _response = fidl::client::decode_transaction_body::<
15569 fidl::encoding::FlexibleResultType<
15570 SecureMemGetPhysicalSecureHeapPropertiesResponse,
15571 Error,
15572 >,
15573 fidl::encoding::DefaultFuchsiaResourceDialect,
15574 0xc6f06889009c7bc,
15575 >(_buf?)?
15576 .into_result::<SecureMemMarker>("get_physical_secure_heap_properties")?;
15577 Ok(_response.map(|x| x))
15578 }
15579 self.client.send_query_and_decode::<
15580 SecureMemGetPhysicalSecureHeapPropertiesRequest,
15581 SecureMemGetPhysicalSecureHeapPropertiesResult,
15582 >(
15583 payload,
15584 0xc6f06889009c7bc,
15585 fidl::encoding::DynamicFlags::FLEXIBLE,
15586 _decode,
15587 )
15588 }
15589
15590 type AddSecureHeapPhysicalRangeResponseFut = fidl::client::QueryResponseFut<
15591 SecureMemAddSecureHeapPhysicalRangeResult,
15592 fidl::encoding::DefaultFuchsiaResourceDialect,
15593 >;
15594 fn r#add_secure_heap_physical_range(
15595 &self,
15596 mut payload: &SecureMemAddSecureHeapPhysicalRangeRequest,
15597 ) -> Self::AddSecureHeapPhysicalRangeResponseFut {
15598 fn _decode(
15599 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
15600 ) -> Result<SecureMemAddSecureHeapPhysicalRangeResult, fidl::Error> {
15601 let _response = fidl::client::decode_transaction_body::<
15602 fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
15603 fidl::encoding::DefaultFuchsiaResourceDialect,
15604 0x35f695b9b6c7217a,
15605 >(_buf?)?
15606 .into_result::<SecureMemMarker>("add_secure_heap_physical_range")?;
15607 Ok(_response.map(|x| x))
15608 }
15609 self.client.send_query_and_decode::<
15610 SecureMemAddSecureHeapPhysicalRangeRequest,
15611 SecureMemAddSecureHeapPhysicalRangeResult,
15612 >(
15613 payload,
15614 0x35f695b9b6c7217a,
15615 fidl::encoding::DynamicFlags::FLEXIBLE,
15616 _decode,
15617 )
15618 }
15619
15620 type DeleteSecureHeapPhysicalRangeResponseFut = fidl::client::QueryResponseFut<
15621 SecureMemDeleteSecureHeapPhysicalRangeResult,
15622 fidl::encoding::DefaultFuchsiaResourceDialect,
15623 >;
15624 fn r#delete_secure_heap_physical_range(
15625 &self,
15626 mut payload: &SecureMemDeleteSecureHeapPhysicalRangeRequest,
15627 ) -> Self::DeleteSecureHeapPhysicalRangeResponseFut {
15628 fn _decode(
15629 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
15630 ) -> Result<SecureMemDeleteSecureHeapPhysicalRangeResult, fidl::Error> {
15631 let _response = fidl::client::decode_transaction_body::<
15632 fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
15633 fidl::encoding::DefaultFuchsiaResourceDialect,
15634 0xeaa58c650264c9e,
15635 >(_buf?)?
15636 .into_result::<SecureMemMarker>("delete_secure_heap_physical_range")?;
15637 Ok(_response.map(|x| x))
15638 }
15639 self.client.send_query_and_decode::<
15640 SecureMemDeleteSecureHeapPhysicalRangeRequest,
15641 SecureMemDeleteSecureHeapPhysicalRangeResult,
15642 >(
15643 payload,
15644 0xeaa58c650264c9e,
15645 fidl::encoding::DynamicFlags::FLEXIBLE,
15646 _decode,
15647 )
15648 }
15649
15650 type ModifySecureHeapPhysicalRangeResponseFut = fidl::client::QueryResponseFut<
15651 SecureMemModifySecureHeapPhysicalRangeResult,
15652 fidl::encoding::DefaultFuchsiaResourceDialect,
15653 >;
15654 fn r#modify_secure_heap_physical_range(
15655 &self,
15656 mut payload: &SecureMemModifySecureHeapPhysicalRangeRequest,
15657 ) -> Self::ModifySecureHeapPhysicalRangeResponseFut {
15658 fn _decode(
15659 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
15660 ) -> Result<SecureMemModifySecureHeapPhysicalRangeResult, fidl::Error> {
15661 let _response = fidl::client::decode_transaction_body::<
15662 fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
15663 fidl::encoding::DefaultFuchsiaResourceDialect,
15664 0x60b7448aa1187734,
15665 >(_buf?)?
15666 .into_result::<SecureMemMarker>("modify_secure_heap_physical_range")?;
15667 Ok(_response.map(|x| x))
15668 }
15669 self.client.send_query_and_decode::<
15670 SecureMemModifySecureHeapPhysicalRangeRequest,
15671 SecureMemModifySecureHeapPhysicalRangeResult,
15672 >(
15673 payload,
15674 0x60b7448aa1187734,
15675 fidl::encoding::DynamicFlags::FLEXIBLE,
15676 _decode,
15677 )
15678 }
15679
15680 type ZeroSubRangeResponseFut = fidl::client::QueryResponseFut<
15681 SecureMemZeroSubRangeResult,
15682 fidl::encoding::DefaultFuchsiaResourceDialect,
15683 >;
15684 fn r#zero_sub_range(
15685 &self,
15686 mut payload: &SecureMemZeroSubRangeRequest,
15687 ) -> Self::ZeroSubRangeResponseFut {
15688 fn _decode(
15689 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
15690 ) -> Result<SecureMemZeroSubRangeResult, fidl::Error> {
15691 let _response = fidl::client::decode_transaction_body::<
15692 fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
15693 fidl::encoding::DefaultFuchsiaResourceDialect,
15694 0x5b25b7901a385ce5,
15695 >(_buf?)?
15696 .into_result::<SecureMemMarker>("zero_sub_range")?;
15697 Ok(_response.map(|x| x))
15698 }
15699 self.client
15700 .send_query_and_decode::<SecureMemZeroSubRangeRequest, SecureMemZeroSubRangeResult>(
15701 payload,
15702 0x5b25b7901a385ce5,
15703 fidl::encoding::DynamicFlags::FLEXIBLE,
15704 _decode,
15705 )
15706 }
15707}
15708
15709pub struct SecureMemEventStream {
15710 event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
15711}
15712
15713impl std::marker::Unpin for SecureMemEventStream {}
15714
15715impl futures::stream::FusedStream for SecureMemEventStream {
15716 fn is_terminated(&self) -> bool {
15717 self.event_receiver.is_terminated()
15718 }
15719}
15720
15721impl futures::Stream for SecureMemEventStream {
15722 type Item = Result<SecureMemEvent, fidl::Error>;
15723
15724 fn poll_next(
15725 mut self: std::pin::Pin<&mut Self>,
15726 cx: &mut std::task::Context<'_>,
15727 ) -> std::task::Poll<Option<Self::Item>> {
15728 match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
15729 &mut self.event_receiver,
15730 cx
15731 )?) {
15732 Some(buf) => std::task::Poll::Ready(Some(SecureMemEvent::decode(buf))),
15733 None => std::task::Poll::Ready(None),
15734 }
15735 }
15736}
15737
15738#[derive(Debug)]
15739pub enum SecureMemEvent {
15740 #[non_exhaustive]
15741 _UnknownEvent {
15742 /// Ordinal of the event that was sent.
15743 ordinal: u64,
15744 },
15745}
15746
15747impl SecureMemEvent {
15748 /// Decodes a message buffer as a [`SecureMemEvent`].
15749 fn decode(
15750 mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
15751 ) -> Result<SecureMemEvent, fidl::Error> {
15752 let (bytes, _handles) = buf.split_mut();
15753 let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
15754 debug_assert_eq!(tx_header.tx_id, 0);
15755 match tx_header.ordinal {
15756 _ if tx_header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
15757 Ok(SecureMemEvent::_UnknownEvent { ordinal: tx_header.ordinal })
15758 }
15759 _ => Err(fidl::Error::UnknownOrdinal {
15760 ordinal: tx_header.ordinal,
15761 protocol_name: <SecureMemMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
15762 }),
15763 }
15764 }
15765}
15766
15767/// A Stream of incoming requests for fuchsia.sysmem2/SecureMem.
15768pub struct SecureMemRequestStream {
15769 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
15770 is_terminated: bool,
15771}
15772
15773impl std::marker::Unpin for SecureMemRequestStream {}
15774
15775impl futures::stream::FusedStream for SecureMemRequestStream {
15776 fn is_terminated(&self) -> bool {
15777 self.is_terminated
15778 }
15779}
15780
15781impl fidl::endpoints::RequestStream for SecureMemRequestStream {
15782 type Protocol = SecureMemMarker;
15783 type ControlHandle = SecureMemControlHandle;
15784
15785 fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
15786 Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
15787 }
15788
15789 fn control_handle(&self) -> Self::ControlHandle {
15790 SecureMemControlHandle { inner: self.inner.clone() }
15791 }
15792
15793 fn into_inner(
15794 self,
15795 ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
15796 {
15797 (self.inner, self.is_terminated)
15798 }
15799
15800 fn from_inner(
15801 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
15802 is_terminated: bool,
15803 ) -> Self {
15804 Self { inner, is_terminated }
15805 }
15806}
15807
15808impl futures::Stream for SecureMemRequestStream {
15809 type Item = Result<SecureMemRequest, fidl::Error>;
15810
15811 fn poll_next(
15812 mut self: std::pin::Pin<&mut Self>,
15813 cx: &mut std::task::Context<'_>,
15814 ) -> std::task::Poll<Option<Self::Item>> {
15815 let this = &mut *self;
15816 if this.inner.check_shutdown(cx) {
15817 this.is_terminated = true;
15818 return std::task::Poll::Ready(None);
15819 }
15820 if this.is_terminated {
15821 panic!("polled SecureMemRequestStream after completion");
15822 }
15823 fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
15824 |bytes, handles| {
15825 match this.inner.channel().read_etc(cx, bytes, handles) {
15826 std::task::Poll::Ready(Ok(())) => {}
15827 std::task::Poll::Pending => return std::task::Poll::Pending,
15828 std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
15829 this.is_terminated = true;
15830 return std::task::Poll::Ready(None);
15831 }
15832 std::task::Poll::Ready(Err(e)) => {
15833 return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
15834 e.into(),
15835 ))));
15836 }
15837 }
15838
15839 // A message has been received from the channel
15840 let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
15841
15842 std::task::Poll::Ready(Some(match header.ordinal {
15843 0x38716300592073e3 => {
15844 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
15845 let mut req = fidl::new_empty!(
15846 fidl::encoding::EmptyPayload,
15847 fidl::encoding::DefaultFuchsiaResourceDialect
15848 );
15849 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
15850 let control_handle = SecureMemControlHandle { inner: this.inner.clone() };
15851 Ok(SecureMemRequest::GetPhysicalSecureHeaps {
15852 responder: SecureMemGetPhysicalSecureHeapsResponder {
15853 control_handle: std::mem::ManuallyDrop::new(control_handle),
15854 tx_id: header.tx_id,
15855 },
15856 })
15857 }
15858 0x1190847f99952834 => {
15859 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
15860 let mut req = fidl::new_empty!(
15861 fidl::encoding::EmptyPayload,
15862 fidl::encoding::DefaultFuchsiaResourceDialect
15863 );
15864 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
15865 let control_handle = SecureMemControlHandle { inner: this.inner.clone() };
15866 Ok(SecureMemRequest::GetDynamicSecureHeaps {
15867 responder: SecureMemGetDynamicSecureHeapsResponder {
15868 control_handle: std::mem::ManuallyDrop::new(control_handle),
15869 tx_id: header.tx_id,
15870 },
15871 })
15872 }
15873 0xc6f06889009c7bc => {
15874 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
15875 let mut req = fidl::new_empty!(
15876 SecureMemGetPhysicalSecureHeapPropertiesRequest,
15877 fidl::encoding::DefaultFuchsiaResourceDialect
15878 );
15879 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<SecureMemGetPhysicalSecureHeapPropertiesRequest>(&header, _body_bytes, handles, &mut req)?;
15880 let control_handle = SecureMemControlHandle { inner: this.inner.clone() };
15881 Ok(SecureMemRequest::GetPhysicalSecureHeapProperties {
15882 payload: req,
15883 responder: SecureMemGetPhysicalSecureHeapPropertiesResponder {
15884 control_handle: std::mem::ManuallyDrop::new(control_handle),
15885 tx_id: header.tx_id,
15886 },
15887 })
15888 }
15889 0x35f695b9b6c7217a => {
15890 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
15891 let mut req = fidl::new_empty!(
15892 SecureMemAddSecureHeapPhysicalRangeRequest,
15893 fidl::encoding::DefaultFuchsiaResourceDialect
15894 );
15895 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<SecureMemAddSecureHeapPhysicalRangeRequest>(&header, _body_bytes, handles, &mut req)?;
15896 let control_handle = SecureMemControlHandle { inner: this.inner.clone() };
15897 Ok(SecureMemRequest::AddSecureHeapPhysicalRange {
15898 payload: req,
15899 responder: SecureMemAddSecureHeapPhysicalRangeResponder {
15900 control_handle: std::mem::ManuallyDrop::new(control_handle),
15901 tx_id: header.tx_id,
15902 },
15903 })
15904 }
15905 0xeaa58c650264c9e => {
15906 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
15907 let mut req = fidl::new_empty!(
15908 SecureMemDeleteSecureHeapPhysicalRangeRequest,
15909 fidl::encoding::DefaultFuchsiaResourceDialect
15910 );
15911 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<SecureMemDeleteSecureHeapPhysicalRangeRequest>(&header, _body_bytes, handles, &mut req)?;
15912 let control_handle = SecureMemControlHandle { inner: this.inner.clone() };
15913 Ok(SecureMemRequest::DeleteSecureHeapPhysicalRange {
15914 payload: req,
15915 responder: SecureMemDeleteSecureHeapPhysicalRangeResponder {
15916 control_handle: std::mem::ManuallyDrop::new(control_handle),
15917 tx_id: header.tx_id,
15918 },
15919 })
15920 }
15921 0x60b7448aa1187734 => {
15922 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
15923 let mut req = fidl::new_empty!(
15924 SecureMemModifySecureHeapPhysicalRangeRequest,
15925 fidl::encoding::DefaultFuchsiaResourceDialect
15926 );
15927 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<SecureMemModifySecureHeapPhysicalRangeRequest>(&header, _body_bytes, handles, &mut req)?;
15928 let control_handle = SecureMemControlHandle { inner: this.inner.clone() };
15929 Ok(SecureMemRequest::ModifySecureHeapPhysicalRange {
15930 payload: req,
15931 responder: SecureMemModifySecureHeapPhysicalRangeResponder {
15932 control_handle: std::mem::ManuallyDrop::new(control_handle),
15933 tx_id: header.tx_id,
15934 },
15935 })
15936 }
15937 0x5b25b7901a385ce5 => {
15938 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
15939 let mut req = fidl::new_empty!(
15940 SecureMemZeroSubRangeRequest,
15941 fidl::encoding::DefaultFuchsiaResourceDialect
15942 );
15943 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<SecureMemZeroSubRangeRequest>(&header, _body_bytes, handles, &mut req)?;
15944 let control_handle = SecureMemControlHandle { inner: this.inner.clone() };
15945 Ok(SecureMemRequest::ZeroSubRange {
15946 payload: req,
15947 responder: SecureMemZeroSubRangeResponder {
15948 control_handle: std::mem::ManuallyDrop::new(control_handle),
15949 tx_id: header.tx_id,
15950 },
15951 })
15952 }
15953 _ if header.tx_id == 0
15954 && header
15955 .dynamic_flags()
15956 .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
15957 {
15958 Ok(SecureMemRequest::_UnknownMethod {
15959 ordinal: header.ordinal,
15960 control_handle: SecureMemControlHandle { inner: this.inner.clone() },
15961 method_type: fidl::MethodType::OneWay,
15962 })
15963 }
15964 _ if header
15965 .dynamic_flags()
15966 .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
15967 {
15968 this.inner.send_framework_err(
15969 fidl::encoding::FrameworkErr::UnknownMethod,
15970 header.tx_id,
15971 header.ordinal,
15972 header.dynamic_flags(),
15973 (bytes, handles),
15974 )?;
15975 Ok(SecureMemRequest::_UnknownMethod {
15976 ordinal: header.ordinal,
15977 control_handle: SecureMemControlHandle { inner: this.inner.clone() },
15978 method_type: fidl::MethodType::TwoWay,
15979 })
15980 }
15981 _ => Err(fidl::Error::UnknownOrdinal {
15982 ordinal: header.ordinal,
15983 protocol_name:
15984 <SecureMemMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
15985 }),
15986 }))
15987 },
15988 )
15989 }
15990}
15991
15992/// SecureMem
15993///
15994/// The client is sysmem. The server is securemem driver.
15995///
15996/// TEE - Trusted Execution Environment.
15997///
15998/// REE - Rich Execution Environment.
15999///
16000/// Enables sysmem to call the securemem driver to get any secure heaps
16001/// configured via the TEE (or via the securemem driver), and set any physical
16002/// secure heaps configured via sysmem.
16003///
16004/// Presently, dynamically-allocated secure heaps are configured via sysmem, as
16005/// it starts quite early during boot and can successfully reserve contiguous
16006/// physical memory. Presently, fixed-location secure heaps are configured via
16007/// TEE, as the plumbing goes from the bootloader to the TEE. However, this
16008/// protocol intentionally doesn't care which heaps are dynamically-allocated
16009/// and which are fixed-location.
16010#[derive(Debug)]
16011pub enum SecureMemRequest {
16012 /// Gets the physical address and length of any secure heap whose physical
16013 /// range is configured via the TEE.
16014 ///
16015 /// Presently, these will be fixed physical addresses and lengths, with the
16016 /// location plumbed via the TEE.
16017 ///
16018 /// This is preferred over ['fuchsia.hardware.sysmem.Sysmem/RegisterHeap']
16019 /// when there isn't any special heap-specific per-VMO setup or teardown
16020 /// required.
16021 ///
16022 /// The physical range must be secured/protected by the TEE before the
16023 /// securemem driver responds to this request with success.
16024 ///
16025 /// Sysmem should only call this once. Returning zero heaps is not a
16026 /// failure.
16027 ///
16028 /// Errors:
16029 /// * PROTOCOL_DEVIATION - called more than once.
16030 /// * UNSPECIFIED - generic internal error (such as in communication
16031 /// with TEE which doesn't generate zx_status_t errors).
16032 /// * other errors are allowed; any other errors should be treated the same
16033 /// as UNSPECIFIED.
16034 GetPhysicalSecureHeaps { responder: SecureMemGetPhysicalSecureHeapsResponder },
16035 /// Gets information about any secure heaps whose physical pages are not
16036 /// configured by the TEE, but by sysmem.
16037 ///
16038 /// Sysmem should only call this once. Returning zero heaps is not a
16039 /// failure.
16040 ///
16041 /// Errors:
16042 /// * PROTOCOL_DEVIATION - called more than once.
16043 /// * UNSPECIFIED - generic internal error (such as in communication
16044 /// with TEE which doesn't generate zx_status_t errors).
16045 /// * other errors are allowed; any other errors should be treated the same
16046 /// as UNSPECIFIED.
16047 GetDynamicSecureHeaps { responder: SecureMemGetDynamicSecureHeapsResponder },
16048 /// This request from sysmem to the securemem driver gets the properties of
16049 /// a protected/secure heap.
16050 ///
16051 /// This only handles heaps with a single contiguous physical extent.
16052 ///
16053 /// The heap's entire physical range is indicated in case this request needs
16054 /// some physical space to auto-detect how many ranges are REE-usable. Any
16055 /// temporary HW protection ranges will be deleted before this request
16056 /// completes.
16057 ///
16058 /// Errors:
16059 /// * UNSPECIFIED - generic internal error (such as in communication
16060 /// with TEE which doesn't generate zx_status_t errors).
16061 /// * other errors are allowed; any other errors should be treated the same
16062 /// as UNSPECIFIED.
16063 GetPhysicalSecureHeapProperties {
16064 payload: SecureMemGetPhysicalSecureHeapPropertiesRequest,
16065 responder: SecureMemGetPhysicalSecureHeapPropertiesResponder,
16066 },
16067 /// This request from sysmem to the securemem driver conveys a physical
16068 /// range to add, for a heap whose physical range(s) are set up via
16069 /// sysmem.
16070 ///
16071 /// Only sysmem can call this because only sysmem is handed the client end
16072 /// of a FIDL channel serving this protocol, via RegisterSecureMem(). The
16073 /// securemem driver is the server end of this protocol.
16074 ///
16075 /// The securemem driver must configure all the covered offsets as protected
16076 /// before responding to this message with success.
16077 ///
16078 /// On failure, the securemem driver must ensure the protected range was not
16079 /// created.
16080 ///
16081 /// Sysmem must only call this up to once if dynamic_protection_ranges
16082 /// false.
16083 ///
16084 /// If dynamic_protection_ranges is true, sysmem can call this multiple
16085 /// times as long as the current number of ranges never exceeds
16086 /// max_protected_range_count.
16087 ///
16088 /// The caller must not attempt to add a range that matches an
16089 /// already-existing range. Added ranges can overlap each other as long as
16090 /// no two ranges match exactly.
16091 ///
16092 /// Errors:
16093 /// * PROTOCOL_DEVIATION - called more than once when
16094 /// !dynamic_protection_ranges. Adding a heap that would cause overall
16095 /// heap count to exceed max_protected_range_count. Unexpected heap, or
16096 /// range that doesn't conform to protected_range_granularity. See log.
16097 /// * UNSPECIFIED - generic internal error (such as in communication
16098 /// with TEE which doesn't generate zx_status_t errors).
16099 /// * other errors are possible, such as from communication failures or
16100 /// server propagation of failures.
16101 AddSecureHeapPhysicalRange {
16102 payload: SecureMemAddSecureHeapPhysicalRangeRequest,
16103 responder: SecureMemAddSecureHeapPhysicalRangeResponder,
16104 },
16105 /// This request from sysmem to the securemem driver conveys a physical
16106 /// range to delete, for a heap whose physical range(s) are set up via
16107 /// sysmem.
16108 ///
16109 /// Only sysmem can call this because only sysmem is handed the client end
16110 /// of a FIDL channel serving this protocol, via RegisterSecureMem(). The
16111 /// securemem driver is the server end of this protocol.
16112 ///
16113 /// The securemem driver must configure all the covered offsets as not
16114 /// protected before responding to this message with success.
16115 ///
16116 /// On failure, the securemem driver must ensure the protected range was not
16117 /// deleted.
16118 ///
16119 /// Sysmem must not call this if dynamic_protection_ranges false.
16120 ///
16121 /// If dynamic_protection_ranges is true, sysmem can call this repeatedly,
16122 /// on various ranges that exist at the time of the call.
16123 ///
16124 /// If any portion of the range being deleted is not also covered by another
16125 /// protected range, then any ongoing DMA to any part of the entire range
16126 /// may be interrupted / may fail, potentially in a way that's disruptive to
16127 /// the entire system (bus lockup or similar, depending on device details).
16128 /// Therefore, the caller must ensure that no ongoing DMA is occurring to
16129 /// any portion of the range being deleted, unless the caller has other
16130 /// active ranges covering every block of the range being deleted. Ongoing
16131 /// DMA to/from blocks outside the range being deleted is never impacted by
16132 /// the deletion.
16133 ///
16134 /// Errors:
16135 /// * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
16136 /// Unexpected heap, or range that doesn't conform to
16137 /// protected_range_granularity.
16138 /// * UNSPECIFIED - generic internal error (such as in communication
16139 /// with TEE which doesn't generate zx_status_t errors).
16140 /// * NOT_FOUND - the specified range is not found.
16141 /// * other errors are possible, such as from communication failures or
16142 /// server propagation of failures.
16143 DeleteSecureHeapPhysicalRange {
16144 payload: SecureMemDeleteSecureHeapPhysicalRangeRequest,
16145 responder: SecureMemDeleteSecureHeapPhysicalRangeResponder,
16146 },
16147 /// This request from sysmem to the securemem driver conveys a physical
16148 /// range to modify and its new base and length, for a heap whose physical
16149 /// range(s) are set up via sysmem.
16150 ///
16151 /// Only sysmem can call this because only sysmem is handed the client end
16152 /// of a FIDL channel serving this protocol, via RegisterSecureMem(). The
16153 /// securemem driver is the server end of this protocol.
16154 ///
16155 /// The securemem driver must configure the range to cover only the new
16156 /// offsets before responding to this message with success.
16157 ///
16158 /// On failure, the securemem driver must ensure the range was not changed.
16159 ///
16160 /// Sysmem must not call this if dynamic_protection_ranges false. Sysmem
16161 /// must not call this if !is_mod_protected_range_available.
16162 ///
16163 /// If dynamic_protection_ranges is true, sysmem can call this repeatedly,
16164 /// on various ranges that exist at the time of the call.
16165 ///
16166 /// The range must only be modified at one end or the other, but not both.
16167 /// If the range is getting shorter, and the un-covered blocks are not
16168 /// covered by other active ranges, any ongoing DMA to the entire range
16169 /// that's geting shorter may fail in a way that disrupts the entire system
16170 /// (bus lockup or similar), so the caller must ensure that no DMA is
16171 /// ongoing to any portion of a range that is getting shorter, unless the
16172 /// blocks being un-covered by the modification to this range are all
16173 /// covered by other active ranges, in which case no disruption to ongoing
16174 /// DMA will occur.
16175 ///
16176 /// If a range is modified to become <= zero length, the range is deleted.
16177 ///
16178 /// Errors:
16179 /// * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
16180 /// Unexpected heap, or old_range or new_range that doesn't conform to
16181 /// protected_range_granularity, or old_range and new_range differ in
16182 /// both begin and end (disallowed).
16183 /// * UNSPECIFIED - generic internal error (such as in communication
16184 /// with TEE which doesn't generate zx_status_t errors).
16185 /// * NOT_FOUND - the specified range is not found.
16186 /// * other errors are possible, such as from communication failures or
16187 /// server propagation of failures.
16188 ModifySecureHeapPhysicalRange {
16189 payload: SecureMemModifySecureHeapPhysicalRangeRequest,
16190 responder: SecureMemModifySecureHeapPhysicalRangeResponder,
16191 },
16192 /// Zero a sub-range of a currently-existing physical range added via
16193 /// AddSecureHeapPhysicalRange(). The sub-range must be fully covered by
16194 /// exactly one physical range, and must not overlap with any other
16195 /// physical range.
16196 ///
16197 /// is_covering_range_explicit - When true, the covering range must be one
16198 /// of the ranges explicitly created via AddSecureHeapPhysicalRange(),
16199 /// possibly modified since. When false, the covering range must not
16200 /// be one of the ranges explicitly created via
16201 /// AddSecureHeapPhysicalRange(), but the covering range must exist as
16202 /// a covering range not created via AddSecureHeapPhysicalRange(). The
16203 /// covering range is typically the entire physical range (or a range
16204 /// which covers even more) of a heap configured by the TEE and whose
16205 /// configuration is conveyed to sysmem via GetPhysicalSecureHeaps().
16206 ///
16207 /// Ongoing DMA is not disrupted by this request.
16208 ///
16209 /// Errors:
16210 /// * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
16211 /// Unexpected heap.
16212 /// * UNSPECIFIED - generic internal error (such as in communication
16213 /// with TEE which doesn't generate zx_status_t errors).
16214 /// * other errors are possible, such as from communication failures or
16215 /// server propagation of failures.
16216 ZeroSubRange {
16217 payload: SecureMemZeroSubRangeRequest,
16218 responder: SecureMemZeroSubRangeResponder,
16219 },
16220 /// An interaction was received which does not match any known method.
16221 #[non_exhaustive]
16222 _UnknownMethod {
16223 /// Ordinal of the method that was called.
16224 ordinal: u64,
16225 control_handle: SecureMemControlHandle,
16226 method_type: fidl::MethodType,
16227 },
16228}
16229
16230impl SecureMemRequest {
16231 #[allow(irrefutable_let_patterns)]
16232 pub fn into_get_physical_secure_heaps(
16233 self,
16234 ) -> Option<(SecureMemGetPhysicalSecureHeapsResponder)> {
16235 if let SecureMemRequest::GetPhysicalSecureHeaps { responder } = self {
16236 Some((responder))
16237 } else {
16238 None
16239 }
16240 }
16241
16242 #[allow(irrefutable_let_patterns)]
16243 pub fn into_get_dynamic_secure_heaps(
16244 self,
16245 ) -> Option<(SecureMemGetDynamicSecureHeapsResponder)> {
16246 if let SecureMemRequest::GetDynamicSecureHeaps { responder } = self {
16247 Some((responder))
16248 } else {
16249 None
16250 }
16251 }
16252
16253 #[allow(irrefutable_let_patterns)]
16254 pub fn into_get_physical_secure_heap_properties(
16255 self,
16256 ) -> Option<(
16257 SecureMemGetPhysicalSecureHeapPropertiesRequest,
16258 SecureMemGetPhysicalSecureHeapPropertiesResponder,
16259 )> {
16260 if let SecureMemRequest::GetPhysicalSecureHeapProperties { payload, responder } = self {
16261 Some((payload, responder))
16262 } else {
16263 None
16264 }
16265 }
16266
16267 #[allow(irrefutable_let_patterns)]
16268 pub fn into_add_secure_heap_physical_range(
16269 self,
16270 ) -> Option<(
16271 SecureMemAddSecureHeapPhysicalRangeRequest,
16272 SecureMemAddSecureHeapPhysicalRangeResponder,
16273 )> {
16274 if let SecureMemRequest::AddSecureHeapPhysicalRange { payload, responder } = self {
16275 Some((payload, responder))
16276 } else {
16277 None
16278 }
16279 }
16280
16281 #[allow(irrefutable_let_patterns)]
16282 pub fn into_delete_secure_heap_physical_range(
16283 self,
16284 ) -> Option<(
16285 SecureMemDeleteSecureHeapPhysicalRangeRequest,
16286 SecureMemDeleteSecureHeapPhysicalRangeResponder,
16287 )> {
16288 if let SecureMemRequest::DeleteSecureHeapPhysicalRange { payload, responder } = self {
16289 Some((payload, responder))
16290 } else {
16291 None
16292 }
16293 }
16294
16295 #[allow(irrefutable_let_patterns)]
16296 pub fn into_modify_secure_heap_physical_range(
16297 self,
16298 ) -> Option<(
16299 SecureMemModifySecureHeapPhysicalRangeRequest,
16300 SecureMemModifySecureHeapPhysicalRangeResponder,
16301 )> {
16302 if let SecureMemRequest::ModifySecureHeapPhysicalRange { payload, responder } = self {
16303 Some((payload, responder))
16304 } else {
16305 None
16306 }
16307 }
16308
16309 #[allow(irrefutable_let_patterns)]
16310 pub fn into_zero_sub_range(
16311 self,
16312 ) -> Option<(SecureMemZeroSubRangeRequest, SecureMemZeroSubRangeResponder)> {
16313 if let SecureMemRequest::ZeroSubRange { payload, responder } = self {
16314 Some((payload, responder))
16315 } else {
16316 None
16317 }
16318 }
16319
16320 /// Name of the method defined in FIDL
16321 pub fn method_name(&self) -> &'static str {
16322 match *self {
16323 SecureMemRequest::GetPhysicalSecureHeaps { .. } => "get_physical_secure_heaps",
16324 SecureMemRequest::GetDynamicSecureHeaps { .. } => "get_dynamic_secure_heaps",
16325 SecureMemRequest::GetPhysicalSecureHeapProperties { .. } => {
16326 "get_physical_secure_heap_properties"
16327 }
16328 SecureMemRequest::AddSecureHeapPhysicalRange { .. } => "add_secure_heap_physical_range",
16329 SecureMemRequest::DeleteSecureHeapPhysicalRange { .. } => {
16330 "delete_secure_heap_physical_range"
16331 }
16332 SecureMemRequest::ModifySecureHeapPhysicalRange { .. } => {
16333 "modify_secure_heap_physical_range"
16334 }
16335 SecureMemRequest::ZeroSubRange { .. } => "zero_sub_range",
16336 SecureMemRequest::_UnknownMethod { method_type: fidl::MethodType::OneWay, .. } => {
16337 "unknown one-way method"
16338 }
16339 SecureMemRequest::_UnknownMethod { method_type: fidl::MethodType::TwoWay, .. } => {
16340 "unknown two-way method"
16341 }
16342 }
16343 }
16344}
16345
16346#[derive(Debug, Clone)]
16347pub struct SecureMemControlHandle {
16348 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
16349}
16350
16351impl fidl::endpoints::ControlHandle for SecureMemControlHandle {
16352 fn shutdown(&self) {
16353 self.inner.shutdown()
16354 }
16355
16356 fn shutdown_with_epitaph(&self, status: zx_status::Status) {
16357 self.inner.shutdown_with_epitaph(status)
16358 }
16359
16360 fn is_closed(&self) -> bool {
16361 self.inner.channel().is_closed()
16362 }
16363 fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
16364 self.inner.channel().on_closed()
16365 }
16366
16367 #[cfg(target_os = "fuchsia")]
16368 fn signal_peer(
16369 &self,
16370 clear_mask: zx::Signals,
16371 set_mask: zx::Signals,
16372 ) -> Result<(), zx_status::Status> {
16373 use fidl::Peered;
16374 self.inner.channel().signal_peer(clear_mask, set_mask)
16375 }
16376}
16377
16378impl SecureMemControlHandle {}
16379
16380#[must_use = "FIDL methods require a response to be sent"]
16381#[derive(Debug)]
16382pub struct SecureMemGetPhysicalSecureHeapsResponder {
16383 control_handle: std::mem::ManuallyDrop<SecureMemControlHandle>,
16384 tx_id: u32,
16385}
16386
16387/// Set the the channel to be shutdown (see [`SecureMemControlHandle::shutdown`])
16388/// if the responder is dropped without sending a response, so that the client
16389/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
16390impl std::ops::Drop for SecureMemGetPhysicalSecureHeapsResponder {
16391 fn drop(&mut self) {
16392 self.control_handle.shutdown();
16393 // Safety: drops once, never accessed again
16394 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16395 }
16396}
16397
16398impl fidl::endpoints::Responder for SecureMemGetPhysicalSecureHeapsResponder {
16399 type ControlHandle = SecureMemControlHandle;
16400
16401 fn control_handle(&self) -> &SecureMemControlHandle {
16402 &self.control_handle
16403 }
16404
16405 fn drop_without_shutdown(mut self) {
16406 // Safety: drops once, never accessed again due to mem::forget
16407 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16408 // Prevent Drop from running (which would shut down the channel)
16409 std::mem::forget(self);
16410 }
16411}
16412
16413impl SecureMemGetPhysicalSecureHeapsResponder {
16414 /// Sends a response to the FIDL transaction.
16415 ///
16416 /// Sets the channel to shutdown if an error occurs.
16417 pub fn send(
16418 self,
16419 mut result: Result<&SecureMemGetPhysicalSecureHeapsResponse, Error>,
16420 ) -> Result<(), fidl::Error> {
16421 let _result = self.send_raw(result);
16422 if _result.is_err() {
16423 self.control_handle.shutdown();
16424 }
16425 self.drop_without_shutdown();
16426 _result
16427 }
16428
16429 /// Similar to "send" but does not shutdown the channel if an error occurs.
16430 pub fn send_no_shutdown_on_err(
16431 self,
16432 mut result: Result<&SecureMemGetPhysicalSecureHeapsResponse, Error>,
16433 ) -> Result<(), fidl::Error> {
16434 let _result = self.send_raw(result);
16435 self.drop_without_shutdown();
16436 _result
16437 }
16438
16439 fn send_raw(
16440 &self,
16441 mut result: Result<&SecureMemGetPhysicalSecureHeapsResponse, Error>,
16442 ) -> Result<(), fidl::Error> {
16443 self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
16444 SecureMemGetPhysicalSecureHeapsResponse,
16445 Error,
16446 >>(
16447 fidl::encoding::FlexibleResult::new(result),
16448 self.tx_id,
16449 0x38716300592073e3,
16450 fidl::encoding::DynamicFlags::FLEXIBLE,
16451 )
16452 }
16453}
16454
16455#[must_use = "FIDL methods require a response to be sent"]
16456#[derive(Debug)]
16457pub struct SecureMemGetDynamicSecureHeapsResponder {
16458 control_handle: std::mem::ManuallyDrop<SecureMemControlHandle>,
16459 tx_id: u32,
16460}
16461
16462/// Set the the channel to be shutdown (see [`SecureMemControlHandle::shutdown`])
16463/// if the responder is dropped without sending a response, so that the client
16464/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
16465impl std::ops::Drop for SecureMemGetDynamicSecureHeapsResponder {
16466 fn drop(&mut self) {
16467 self.control_handle.shutdown();
16468 // Safety: drops once, never accessed again
16469 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16470 }
16471}
16472
16473impl fidl::endpoints::Responder for SecureMemGetDynamicSecureHeapsResponder {
16474 type ControlHandle = SecureMemControlHandle;
16475
16476 fn control_handle(&self) -> &SecureMemControlHandle {
16477 &self.control_handle
16478 }
16479
16480 fn drop_without_shutdown(mut self) {
16481 // Safety: drops once, never accessed again due to mem::forget
16482 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16483 // Prevent Drop from running (which would shut down the channel)
16484 std::mem::forget(self);
16485 }
16486}
16487
16488impl SecureMemGetDynamicSecureHeapsResponder {
16489 /// Sends a response to the FIDL transaction.
16490 ///
16491 /// Sets the channel to shutdown if an error occurs.
16492 pub fn send(
16493 self,
16494 mut result: Result<&SecureMemGetDynamicSecureHeapsResponse, Error>,
16495 ) -> Result<(), fidl::Error> {
16496 let _result = self.send_raw(result);
16497 if _result.is_err() {
16498 self.control_handle.shutdown();
16499 }
16500 self.drop_without_shutdown();
16501 _result
16502 }
16503
16504 /// Similar to "send" but does not shutdown the channel if an error occurs.
16505 pub fn send_no_shutdown_on_err(
16506 self,
16507 mut result: Result<&SecureMemGetDynamicSecureHeapsResponse, Error>,
16508 ) -> Result<(), fidl::Error> {
16509 let _result = self.send_raw(result);
16510 self.drop_without_shutdown();
16511 _result
16512 }
16513
16514 fn send_raw(
16515 &self,
16516 mut result: Result<&SecureMemGetDynamicSecureHeapsResponse, Error>,
16517 ) -> Result<(), fidl::Error> {
16518 self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
16519 SecureMemGetDynamicSecureHeapsResponse,
16520 Error,
16521 >>(
16522 fidl::encoding::FlexibleResult::new(result),
16523 self.tx_id,
16524 0x1190847f99952834,
16525 fidl::encoding::DynamicFlags::FLEXIBLE,
16526 )
16527 }
16528}
16529
16530#[must_use = "FIDL methods require a response to be sent"]
16531#[derive(Debug)]
16532pub struct SecureMemGetPhysicalSecureHeapPropertiesResponder {
16533 control_handle: std::mem::ManuallyDrop<SecureMemControlHandle>,
16534 tx_id: u32,
16535}
16536
16537/// Set the the channel to be shutdown (see [`SecureMemControlHandle::shutdown`])
16538/// if the responder is dropped without sending a response, so that the client
16539/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
16540impl std::ops::Drop for SecureMemGetPhysicalSecureHeapPropertiesResponder {
16541 fn drop(&mut self) {
16542 self.control_handle.shutdown();
16543 // Safety: drops once, never accessed again
16544 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16545 }
16546}
16547
16548impl fidl::endpoints::Responder for SecureMemGetPhysicalSecureHeapPropertiesResponder {
16549 type ControlHandle = SecureMemControlHandle;
16550
16551 fn control_handle(&self) -> &SecureMemControlHandle {
16552 &self.control_handle
16553 }
16554
16555 fn drop_without_shutdown(mut self) {
16556 // Safety: drops once, never accessed again due to mem::forget
16557 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16558 // Prevent Drop from running (which would shut down the channel)
16559 std::mem::forget(self);
16560 }
16561}
16562
16563impl SecureMemGetPhysicalSecureHeapPropertiesResponder {
16564 /// Sends a response to the FIDL transaction.
16565 ///
16566 /// Sets the channel to shutdown if an error occurs.
16567 pub fn send(
16568 self,
16569 mut result: Result<&SecureMemGetPhysicalSecureHeapPropertiesResponse, Error>,
16570 ) -> Result<(), fidl::Error> {
16571 let _result = self.send_raw(result);
16572 if _result.is_err() {
16573 self.control_handle.shutdown();
16574 }
16575 self.drop_without_shutdown();
16576 _result
16577 }
16578
16579 /// Similar to "send" but does not shutdown the channel if an error occurs.
16580 pub fn send_no_shutdown_on_err(
16581 self,
16582 mut result: Result<&SecureMemGetPhysicalSecureHeapPropertiesResponse, Error>,
16583 ) -> Result<(), fidl::Error> {
16584 let _result = self.send_raw(result);
16585 self.drop_without_shutdown();
16586 _result
16587 }
16588
16589 fn send_raw(
16590 &self,
16591 mut result: Result<&SecureMemGetPhysicalSecureHeapPropertiesResponse, Error>,
16592 ) -> Result<(), fidl::Error> {
16593 self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
16594 SecureMemGetPhysicalSecureHeapPropertiesResponse,
16595 Error,
16596 >>(
16597 fidl::encoding::FlexibleResult::new(result),
16598 self.tx_id,
16599 0xc6f06889009c7bc,
16600 fidl::encoding::DynamicFlags::FLEXIBLE,
16601 )
16602 }
16603}
16604
16605#[must_use = "FIDL methods require a response to be sent"]
16606#[derive(Debug)]
16607pub struct SecureMemAddSecureHeapPhysicalRangeResponder {
16608 control_handle: std::mem::ManuallyDrop<SecureMemControlHandle>,
16609 tx_id: u32,
16610}
16611
16612/// Set the the channel to be shutdown (see [`SecureMemControlHandle::shutdown`])
16613/// if the responder is dropped without sending a response, so that the client
16614/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
16615impl std::ops::Drop for SecureMemAddSecureHeapPhysicalRangeResponder {
16616 fn drop(&mut self) {
16617 self.control_handle.shutdown();
16618 // Safety: drops once, never accessed again
16619 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16620 }
16621}
16622
16623impl fidl::endpoints::Responder for SecureMemAddSecureHeapPhysicalRangeResponder {
16624 type ControlHandle = SecureMemControlHandle;
16625
16626 fn control_handle(&self) -> &SecureMemControlHandle {
16627 &self.control_handle
16628 }
16629
16630 fn drop_without_shutdown(mut self) {
16631 // Safety: drops once, never accessed again due to mem::forget
16632 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16633 // Prevent Drop from running (which would shut down the channel)
16634 std::mem::forget(self);
16635 }
16636}
16637
16638impl SecureMemAddSecureHeapPhysicalRangeResponder {
16639 /// Sends a response to the FIDL transaction.
16640 ///
16641 /// Sets the channel to shutdown if an error occurs.
16642 pub fn send(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16643 let _result = self.send_raw(result);
16644 if _result.is_err() {
16645 self.control_handle.shutdown();
16646 }
16647 self.drop_without_shutdown();
16648 _result
16649 }
16650
16651 /// Similar to "send" but does not shutdown the channel if an error occurs.
16652 pub fn send_no_shutdown_on_err(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16653 let _result = self.send_raw(result);
16654 self.drop_without_shutdown();
16655 _result
16656 }
16657
16658 fn send_raw(&self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16659 self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
16660 fidl::encoding::EmptyStruct,
16661 Error,
16662 >>(
16663 fidl::encoding::FlexibleResult::new(result),
16664 self.tx_id,
16665 0x35f695b9b6c7217a,
16666 fidl::encoding::DynamicFlags::FLEXIBLE,
16667 )
16668 }
16669}
16670
16671#[must_use = "FIDL methods require a response to be sent"]
16672#[derive(Debug)]
16673pub struct SecureMemDeleteSecureHeapPhysicalRangeResponder {
16674 control_handle: std::mem::ManuallyDrop<SecureMemControlHandle>,
16675 tx_id: u32,
16676}
16677
16678/// Set the the channel to be shutdown (see [`SecureMemControlHandle::shutdown`])
16679/// if the responder is dropped without sending a response, so that the client
16680/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
16681impl std::ops::Drop for SecureMemDeleteSecureHeapPhysicalRangeResponder {
16682 fn drop(&mut self) {
16683 self.control_handle.shutdown();
16684 // Safety: drops once, never accessed again
16685 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16686 }
16687}
16688
16689impl fidl::endpoints::Responder for SecureMemDeleteSecureHeapPhysicalRangeResponder {
16690 type ControlHandle = SecureMemControlHandle;
16691
16692 fn control_handle(&self) -> &SecureMemControlHandle {
16693 &self.control_handle
16694 }
16695
16696 fn drop_without_shutdown(mut self) {
16697 // Safety: drops once, never accessed again due to mem::forget
16698 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16699 // Prevent Drop from running (which would shut down the channel)
16700 std::mem::forget(self);
16701 }
16702}
16703
16704impl SecureMemDeleteSecureHeapPhysicalRangeResponder {
16705 /// Sends a response to the FIDL transaction.
16706 ///
16707 /// Sets the channel to shutdown if an error occurs.
16708 pub fn send(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16709 let _result = self.send_raw(result);
16710 if _result.is_err() {
16711 self.control_handle.shutdown();
16712 }
16713 self.drop_without_shutdown();
16714 _result
16715 }
16716
16717 /// Similar to "send" but does not shutdown the channel if an error occurs.
16718 pub fn send_no_shutdown_on_err(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16719 let _result = self.send_raw(result);
16720 self.drop_without_shutdown();
16721 _result
16722 }
16723
16724 fn send_raw(&self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16725 self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
16726 fidl::encoding::EmptyStruct,
16727 Error,
16728 >>(
16729 fidl::encoding::FlexibleResult::new(result),
16730 self.tx_id,
16731 0xeaa58c650264c9e,
16732 fidl::encoding::DynamicFlags::FLEXIBLE,
16733 )
16734 }
16735}
16736
16737#[must_use = "FIDL methods require a response to be sent"]
16738#[derive(Debug)]
16739pub struct SecureMemModifySecureHeapPhysicalRangeResponder {
16740 control_handle: std::mem::ManuallyDrop<SecureMemControlHandle>,
16741 tx_id: u32,
16742}
16743
16744/// Set the the channel to be shutdown (see [`SecureMemControlHandle::shutdown`])
16745/// if the responder is dropped without sending a response, so that the client
16746/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
16747impl std::ops::Drop for SecureMemModifySecureHeapPhysicalRangeResponder {
16748 fn drop(&mut self) {
16749 self.control_handle.shutdown();
16750 // Safety: drops once, never accessed again
16751 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16752 }
16753}
16754
16755impl fidl::endpoints::Responder for SecureMemModifySecureHeapPhysicalRangeResponder {
16756 type ControlHandle = SecureMemControlHandle;
16757
16758 fn control_handle(&self) -> &SecureMemControlHandle {
16759 &self.control_handle
16760 }
16761
16762 fn drop_without_shutdown(mut self) {
16763 // Safety: drops once, never accessed again due to mem::forget
16764 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16765 // Prevent Drop from running (which would shut down the channel)
16766 std::mem::forget(self);
16767 }
16768}
16769
16770impl SecureMemModifySecureHeapPhysicalRangeResponder {
16771 /// Sends a response to the FIDL transaction.
16772 ///
16773 /// Sets the channel to shutdown if an error occurs.
16774 pub fn send(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16775 let _result = self.send_raw(result);
16776 if _result.is_err() {
16777 self.control_handle.shutdown();
16778 }
16779 self.drop_without_shutdown();
16780 _result
16781 }
16782
16783 /// Similar to "send" but does not shutdown the channel if an error occurs.
16784 pub fn send_no_shutdown_on_err(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16785 let _result = self.send_raw(result);
16786 self.drop_without_shutdown();
16787 _result
16788 }
16789
16790 fn send_raw(&self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16791 self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
16792 fidl::encoding::EmptyStruct,
16793 Error,
16794 >>(
16795 fidl::encoding::FlexibleResult::new(result),
16796 self.tx_id,
16797 0x60b7448aa1187734,
16798 fidl::encoding::DynamicFlags::FLEXIBLE,
16799 )
16800 }
16801}
16802
16803#[must_use = "FIDL methods require a response to be sent"]
16804#[derive(Debug)]
16805pub struct SecureMemZeroSubRangeResponder {
16806 control_handle: std::mem::ManuallyDrop<SecureMemControlHandle>,
16807 tx_id: u32,
16808}
16809
16810/// Set the the channel to be shutdown (see [`SecureMemControlHandle::shutdown`])
16811/// if the responder is dropped without sending a response, so that the client
16812/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
16813impl std::ops::Drop for SecureMemZeroSubRangeResponder {
16814 fn drop(&mut self) {
16815 self.control_handle.shutdown();
16816 // Safety: drops once, never accessed again
16817 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16818 }
16819}
16820
16821impl fidl::endpoints::Responder for SecureMemZeroSubRangeResponder {
16822 type ControlHandle = SecureMemControlHandle;
16823
16824 fn control_handle(&self) -> &SecureMemControlHandle {
16825 &self.control_handle
16826 }
16827
16828 fn drop_without_shutdown(mut self) {
16829 // Safety: drops once, never accessed again due to mem::forget
16830 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16831 // Prevent Drop from running (which would shut down the channel)
16832 std::mem::forget(self);
16833 }
16834}
16835
16836impl SecureMemZeroSubRangeResponder {
16837 /// Sends a response to the FIDL transaction.
16838 ///
16839 /// Sets the channel to shutdown if an error occurs.
16840 pub fn send(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16841 let _result = self.send_raw(result);
16842 if _result.is_err() {
16843 self.control_handle.shutdown();
16844 }
16845 self.drop_without_shutdown();
16846 _result
16847 }
16848
16849 /// Similar to "send" but does not shutdown the channel if an error occurs.
16850 pub fn send_no_shutdown_on_err(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16851 let _result = self.send_raw(result);
16852 self.drop_without_shutdown();
16853 _result
16854 }
16855
16856 fn send_raw(&self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16857 self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
16858 fidl::encoding::EmptyStruct,
16859 Error,
16860 >>(
16861 fidl::encoding::FlexibleResult::new(result),
16862 self.tx_id,
16863 0x5b25b7901a385ce5,
16864 fidl::encoding::DynamicFlags::FLEXIBLE,
16865 )
16866 }
16867}
16868
16869mod internal {
16870 use super::*;
16871
16872 impl AllocatorAllocateNonSharedCollectionRequest {
16873 #[inline(always)]
16874 fn max_ordinal_present(&self) -> u64 {
16875 if let Some(_) = self.collection_request {
16876 return 1;
16877 }
16878 0
16879 }
16880 }
16881
16882 impl fidl::encoding::ResourceTypeMarker for AllocatorAllocateNonSharedCollectionRequest {
16883 type Borrowed<'a> = &'a mut Self;
16884 fn take_or_borrow<'a>(
16885 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
16886 ) -> Self::Borrowed<'a> {
16887 value
16888 }
16889 }
16890
16891 unsafe impl fidl::encoding::TypeMarker for AllocatorAllocateNonSharedCollectionRequest {
16892 type Owned = Self;
16893
16894 #[inline(always)]
16895 fn inline_align(_context: fidl::encoding::Context) -> usize {
16896 8
16897 }
16898
16899 #[inline(always)]
16900 fn inline_size(_context: fidl::encoding::Context) -> usize {
16901 16
16902 }
16903 }
16904
16905 unsafe impl
16906 fidl::encoding::Encode<
16907 AllocatorAllocateNonSharedCollectionRequest,
16908 fidl::encoding::DefaultFuchsiaResourceDialect,
16909 > for &mut AllocatorAllocateNonSharedCollectionRequest
16910 {
16911 unsafe fn encode(
16912 self,
16913 encoder: &mut fidl::encoding::Encoder<
16914 '_,
16915 fidl::encoding::DefaultFuchsiaResourceDialect,
16916 >,
16917 offset: usize,
16918 mut depth: fidl::encoding::Depth,
16919 ) -> fidl::Result<()> {
16920 encoder.debug_check_bounds::<AllocatorAllocateNonSharedCollectionRequest>(offset);
16921 // Vector header
16922 let max_ordinal: u64 = self.max_ordinal_present();
16923 encoder.write_num(max_ordinal, offset);
16924 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
16925 // Calling encoder.out_of_line_offset(0) is not allowed.
16926 if max_ordinal == 0 {
16927 return Ok(());
16928 }
16929 depth.increment()?;
16930 let envelope_size = 8;
16931 let bytes_len = max_ordinal as usize * envelope_size;
16932 #[allow(unused_variables)]
16933 let offset = encoder.out_of_line_offset(bytes_len);
16934 let mut _prev_end_offset: usize = 0;
16935 if 1 > max_ordinal {
16936 return Ok(());
16937 }
16938
16939 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
16940 // are envelope_size bytes.
16941 let cur_offset: usize = (1 - 1) * envelope_size;
16942
16943 // Zero reserved fields.
16944 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
16945
16946 // Safety:
16947 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
16948 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
16949 // envelope_size bytes, there is always sufficient room.
16950 fidl::encoding::encode_in_envelope_optional::<fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionMarker>>, fidl::encoding::DefaultFuchsiaResourceDialect>(
16951 self.collection_request.as_mut().map(<fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionMarker>> as fidl::encoding::ResourceTypeMarker>::take_or_borrow),
16952 encoder, offset + cur_offset, depth
16953 )?;
16954
16955 _prev_end_offset = cur_offset + envelope_size;
16956
16957 Ok(())
16958 }
16959 }
16960
16961 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
16962 for AllocatorAllocateNonSharedCollectionRequest
16963 {
16964 #[inline(always)]
16965 fn new_empty() -> Self {
16966 Self::default()
16967 }
16968
16969 unsafe fn decode(
16970 &mut self,
16971 decoder: &mut fidl::encoding::Decoder<
16972 '_,
16973 fidl::encoding::DefaultFuchsiaResourceDialect,
16974 >,
16975 offset: usize,
16976 mut depth: fidl::encoding::Depth,
16977 ) -> fidl::Result<()> {
16978 decoder.debug_check_bounds::<Self>(offset);
16979 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
16980 None => return Err(fidl::Error::NotNullable),
16981 Some(len) => len,
16982 };
16983 // Calling decoder.out_of_line_offset(0) is not allowed.
16984 if len == 0 {
16985 return Ok(());
16986 };
16987 depth.increment()?;
16988 let envelope_size = 8;
16989 let bytes_len = len * envelope_size;
16990 let offset = decoder.out_of_line_offset(bytes_len)?;
16991 // Decode the envelope for each type.
16992 let mut _next_ordinal_to_read = 0;
16993 let mut next_offset = offset;
16994 let end_offset = offset + bytes_len;
16995 _next_ordinal_to_read += 1;
16996 if next_offset >= end_offset {
16997 return Ok(());
16998 }
16999
17000 // Decode unknown envelopes for gaps in ordinals.
17001 while _next_ordinal_to_read < 1 {
17002 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17003 _next_ordinal_to_read += 1;
17004 next_offset += envelope_size;
17005 }
17006
17007 let next_out_of_line = decoder.next_out_of_line();
17008 let handles_before = decoder.remaining_handles();
17009 if let Some((inlined, num_bytes, num_handles)) =
17010 fidl::encoding::decode_envelope_header(decoder, next_offset)?
17011 {
17012 let member_inline_size = <fidl::encoding::Endpoint<
17013 fidl::endpoints::ServerEnd<BufferCollectionMarker>,
17014 > as fidl::encoding::TypeMarker>::inline_size(
17015 decoder.context
17016 );
17017 if inlined != (member_inline_size <= 4) {
17018 return Err(fidl::Error::InvalidInlineBitInEnvelope);
17019 }
17020 let inner_offset;
17021 let mut inner_depth = depth.clone();
17022 if inlined {
17023 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
17024 inner_offset = next_offset;
17025 } else {
17026 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
17027 inner_depth.increment()?;
17028 }
17029 let val_ref = self.collection_request.get_or_insert_with(|| {
17030 fidl::new_empty!(
17031 fidl::encoding::Endpoint<
17032 fidl::endpoints::ServerEnd<BufferCollectionMarker>,
17033 >,
17034 fidl::encoding::DefaultFuchsiaResourceDialect
17035 )
17036 });
17037 fidl::decode!(
17038 fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionMarker>>,
17039 fidl::encoding::DefaultFuchsiaResourceDialect,
17040 val_ref,
17041 decoder,
17042 inner_offset,
17043 inner_depth
17044 )?;
17045 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
17046 {
17047 return Err(fidl::Error::InvalidNumBytesInEnvelope);
17048 }
17049 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
17050 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
17051 }
17052 }
17053
17054 next_offset += envelope_size;
17055
17056 // Decode the remaining unknown envelopes.
17057 while next_offset < end_offset {
17058 _next_ordinal_to_read += 1;
17059 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17060 next_offset += envelope_size;
17061 }
17062
17063 Ok(())
17064 }
17065 }
17066
17067 impl AllocatorAllocateSharedCollectionRequest {
17068 #[inline(always)]
17069 fn max_ordinal_present(&self) -> u64 {
17070 if let Some(_) = self.token_request {
17071 return 1;
17072 }
17073 0
17074 }
17075 }
17076
17077 impl fidl::encoding::ResourceTypeMarker for AllocatorAllocateSharedCollectionRequest {
17078 type Borrowed<'a> = &'a mut Self;
17079 fn take_or_borrow<'a>(
17080 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
17081 ) -> Self::Borrowed<'a> {
17082 value
17083 }
17084 }
17085
17086 unsafe impl fidl::encoding::TypeMarker for AllocatorAllocateSharedCollectionRequest {
17087 type Owned = Self;
17088
17089 #[inline(always)]
17090 fn inline_align(_context: fidl::encoding::Context) -> usize {
17091 8
17092 }
17093
17094 #[inline(always)]
17095 fn inline_size(_context: fidl::encoding::Context) -> usize {
17096 16
17097 }
17098 }
17099
17100 unsafe impl
17101 fidl::encoding::Encode<
17102 AllocatorAllocateSharedCollectionRequest,
17103 fidl::encoding::DefaultFuchsiaResourceDialect,
17104 > for &mut AllocatorAllocateSharedCollectionRequest
17105 {
17106 unsafe fn encode(
17107 self,
17108 encoder: &mut fidl::encoding::Encoder<
17109 '_,
17110 fidl::encoding::DefaultFuchsiaResourceDialect,
17111 >,
17112 offset: usize,
17113 mut depth: fidl::encoding::Depth,
17114 ) -> fidl::Result<()> {
17115 encoder.debug_check_bounds::<AllocatorAllocateSharedCollectionRequest>(offset);
17116 // Vector header
17117 let max_ordinal: u64 = self.max_ordinal_present();
17118 encoder.write_num(max_ordinal, offset);
17119 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
17120 // Calling encoder.out_of_line_offset(0) is not allowed.
17121 if max_ordinal == 0 {
17122 return Ok(());
17123 }
17124 depth.increment()?;
17125 let envelope_size = 8;
17126 let bytes_len = max_ordinal as usize * envelope_size;
17127 #[allow(unused_variables)]
17128 let offset = encoder.out_of_line_offset(bytes_len);
17129 let mut _prev_end_offset: usize = 0;
17130 if 1 > max_ordinal {
17131 return Ok(());
17132 }
17133
17134 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
17135 // are envelope_size bytes.
17136 let cur_offset: usize = (1 - 1) * envelope_size;
17137
17138 // Zero reserved fields.
17139 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
17140
17141 // Safety:
17142 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
17143 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
17144 // envelope_size bytes, there is always sufficient room.
17145 fidl::encoding::encode_in_envelope_optional::<
17146 fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
17147 fidl::encoding::DefaultFuchsiaResourceDialect,
17148 >(
17149 self.token_request.as_mut().map(
17150 <fidl::encoding::Endpoint<
17151 fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
17152 > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
17153 ),
17154 encoder,
17155 offset + cur_offset,
17156 depth,
17157 )?;
17158
17159 _prev_end_offset = cur_offset + envelope_size;
17160
17161 Ok(())
17162 }
17163 }
17164
17165 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
17166 for AllocatorAllocateSharedCollectionRequest
17167 {
17168 #[inline(always)]
17169 fn new_empty() -> Self {
17170 Self::default()
17171 }
17172
17173 unsafe fn decode(
17174 &mut self,
17175 decoder: &mut fidl::encoding::Decoder<
17176 '_,
17177 fidl::encoding::DefaultFuchsiaResourceDialect,
17178 >,
17179 offset: usize,
17180 mut depth: fidl::encoding::Depth,
17181 ) -> fidl::Result<()> {
17182 decoder.debug_check_bounds::<Self>(offset);
17183 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
17184 None => return Err(fidl::Error::NotNullable),
17185 Some(len) => len,
17186 };
17187 // Calling decoder.out_of_line_offset(0) is not allowed.
17188 if len == 0 {
17189 return Ok(());
17190 };
17191 depth.increment()?;
17192 let envelope_size = 8;
17193 let bytes_len = len * envelope_size;
17194 let offset = decoder.out_of_line_offset(bytes_len)?;
17195 // Decode the envelope for each type.
17196 let mut _next_ordinal_to_read = 0;
17197 let mut next_offset = offset;
17198 let end_offset = offset + bytes_len;
17199 _next_ordinal_to_read += 1;
17200 if next_offset >= end_offset {
17201 return Ok(());
17202 }
17203
17204 // Decode unknown envelopes for gaps in ordinals.
17205 while _next_ordinal_to_read < 1 {
17206 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17207 _next_ordinal_to_read += 1;
17208 next_offset += envelope_size;
17209 }
17210
17211 let next_out_of_line = decoder.next_out_of_line();
17212 let handles_before = decoder.remaining_handles();
17213 if let Some((inlined, num_bytes, num_handles)) =
17214 fidl::encoding::decode_envelope_header(decoder, next_offset)?
17215 {
17216 let member_inline_size = <fidl::encoding::Endpoint<
17217 fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
17218 > as fidl::encoding::TypeMarker>::inline_size(
17219 decoder.context
17220 );
17221 if inlined != (member_inline_size <= 4) {
17222 return Err(fidl::Error::InvalidInlineBitInEnvelope);
17223 }
17224 let inner_offset;
17225 let mut inner_depth = depth.clone();
17226 if inlined {
17227 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
17228 inner_offset = next_offset;
17229 } else {
17230 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
17231 inner_depth.increment()?;
17232 }
17233 let val_ref = self.token_request.get_or_insert_with(|| {
17234 fidl::new_empty!(
17235 fidl::encoding::Endpoint<
17236 fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
17237 >,
17238 fidl::encoding::DefaultFuchsiaResourceDialect
17239 )
17240 });
17241 fidl::decode!(
17242 fidl::encoding::Endpoint<
17243 fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
17244 >,
17245 fidl::encoding::DefaultFuchsiaResourceDialect,
17246 val_ref,
17247 decoder,
17248 inner_offset,
17249 inner_depth
17250 )?;
17251 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
17252 {
17253 return Err(fidl::Error::InvalidNumBytesInEnvelope);
17254 }
17255 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
17256 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
17257 }
17258 }
17259
17260 next_offset += envelope_size;
17261
17262 // Decode the remaining unknown envelopes.
17263 while next_offset < end_offset {
17264 _next_ordinal_to_read += 1;
17265 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17266 next_offset += envelope_size;
17267 }
17268
17269 Ok(())
17270 }
17271 }
17272
17273 impl AllocatorBindSharedCollectionRequest {
17274 #[inline(always)]
17275 fn max_ordinal_present(&self) -> u64 {
17276 if let Some(_) = self.buffer_collection_request {
17277 return 2;
17278 }
17279 if let Some(_) = self.token {
17280 return 1;
17281 }
17282 0
17283 }
17284 }
17285
17286 impl fidl::encoding::ResourceTypeMarker for AllocatorBindSharedCollectionRequest {
17287 type Borrowed<'a> = &'a mut Self;
17288 fn take_or_borrow<'a>(
17289 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
17290 ) -> Self::Borrowed<'a> {
17291 value
17292 }
17293 }
17294
17295 unsafe impl fidl::encoding::TypeMarker for AllocatorBindSharedCollectionRequest {
17296 type Owned = Self;
17297
17298 #[inline(always)]
17299 fn inline_align(_context: fidl::encoding::Context) -> usize {
17300 8
17301 }
17302
17303 #[inline(always)]
17304 fn inline_size(_context: fidl::encoding::Context) -> usize {
17305 16
17306 }
17307 }
17308
17309 unsafe impl
17310 fidl::encoding::Encode<
17311 AllocatorBindSharedCollectionRequest,
17312 fidl::encoding::DefaultFuchsiaResourceDialect,
17313 > for &mut AllocatorBindSharedCollectionRequest
17314 {
17315 unsafe fn encode(
17316 self,
17317 encoder: &mut fidl::encoding::Encoder<
17318 '_,
17319 fidl::encoding::DefaultFuchsiaResourceDialect,
17320 >,
17321 offset: usize,
17322 mut depth: fidl::encoding::Depth,
17323 ) -> fidl::Result<()> {
17324 encoder.debug_check_bounds::<AllocatorBindSharedCollectionRequest>(offset);
17325 // Vector header
17326 let max_ordinal: u64 = self.max_ordinal_present();
17327 encoder.write_num(max_ordinal, offset);
17328 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
17329 // Calling encoder.out_of_line_offset(0) is not allowed.
17330 if max_ordinal == 0 {
17331 return Ok(());
17332 }
17333 depth.increment()?;
17334 let envelope_size = 8;
17335 let bytes_len = max_ordinal as usize * envelope_size;
17336 #[allow(unused_variables)]
17337 let offset = encoder.out_of_line_offset(bytes_len);
17338 let mut _prev_end_offset: usize = 0;
17339 if 1 > max_ordinal {
17340 return Ok(());
17341 }
17342
17343 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
17344 // are envelope_size bytes.
17345 let cur_offset: usize = (1 - 1) * envelope_size;
17346
17347 // Zero reserved fields.
17348 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
17349
17350 // Safety:
17351 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
17352 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
17353 // envelope_size bytes, there is always sufficient room.
17354 fidl::encoding::encode_in_envelope_optional::<
17355 fidl::encoding::Endpoint<fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>>,
17356 fidl::encoding::DefaultFuchsiaResourceDialect,
17357 >(
17358 self.token.as_mut().map(
17359 <fidl::encoding::Endpoint<
17360 fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
17361 > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
17362 ),
17363 encoder,
17364 offset + cur_offset,
17365 depth,
17366 )?;
17367
17368 _prev_end_offset = cur_offset + envelope_size;
17369 if 2 > max_ordinal {
17370 return Ok(());
17371 }
17372
17373 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
17374 // are envelope_size bytes.
17375 let cur_offset: usize = (2 - 1) * envelope_size;
17376
17377 // Zero reserved fields.
17378 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
17379
17380 // Safety:
17381 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
17382 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
17383 // envelope_size bytes, there is always sufficient room.
17384 fidl::encoding::encode_in_envelope_optional::<fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionMarker>>, fidl::encoding::DefaultFuchsiaResourceDialect>(
17385 self.buffer_collection_request.as_mut().map(<fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionMarker>> as fidl::encoding::ResourceTypeMarker>::take_or_borrow),
17386 encoder, offset + cur_offset, depth
17387 )?;
17388
17389 _prev_end_offset = cur_offset + envelope_size;
17390
17391 Ok(())
17392 }
17393 }
17394
17395 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
17396 for AllocatorBindSharedCollectionRequest
17397 {
17398 #[inline(always)]
17399 fn new_empty() -> Self {
17400 Self::default()
17401 }
17402
17403 unsafe fn decode(
17404 &mut self,
17405 decoder: &mut fidl::encoding::Decoder<
17406 '_,
17407 fidl::encoding::DefaultFuchsiaResourceDialect,
17408 >,
17409 offset: usize,
17410 mut depth: fidl::encoding::Depth,
17411 ) -> fidl::Result<()> {
17412 decoder.debug_check_bounds::<Self>(offset);
17413 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
17414 None => return Err(fidl::Error::NotNullable),
17415 Some(len) => len,
17416 };
17417 // Calling decoder.out_of_line_offset(0) is not allowed.
17418 if len == 0 {
17419 return Ok(());
17420 };
17421 depth.increment()?;
17422 let envelope_size = 8;
17423 let bytes_len = len * envelope_size;
17424 let offset = decoder.out_of_line_offset(bytes_len)?;
17425 // Decode the envelope for each type.
17426 let mut _next_ordinal_to_read = 0;
17427 let mut next_offset = offset;
17428 let end_offset = offset + bytes_len;
17429 _next_ordinal_to_read += 1;
17430 if next_offset >= end_offset {
17431 return Ok(());
17432 }
17433
17434 // Decode unknown envelopes for gaps in ordinals.
17435 while _next_ordinal_to_read < 1 {
17436 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17437 _next_ordinal_to_read += 1;
17438 next_offset += envelope_size;
17439 }
17440
17441 let next_out_of_line = decoder.next_out_of_line();
17442 let handles_before = decoder.remaining_handles();
17443 if let Some((inlined, num_bytes, num_handles)) =
17444 fidl::encoding::decode_envelope_header(decoder, next_offset)?
17445 {
17446 let member_inline_size = <fidl::encoding::Endpoint<
17447 fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
17448 > as fidl::encoding::TypeMarker>::inline_size(
17449 decoder.context
17450 );
17451 if inlined != (member_inline_size <= 4) {
17452 return Err(fidl::Error::InvalidInlineBitInEnvelope);
17453 }
17454 let inner_offset;
17455 let mut inner_depth = depth.clone();
17456 if inlined {
17457 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
17458 inner_offset = next_offset;
17459 } else {
17460 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
17461 inner_depth.increment()?;
17462 }
17463 let val_ref = self.token.get_or_insert_with(|| {
17464 fidl::new_empty!(
17465 fidl::encoding::Endpoint<
17466 fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
17467 >,
17468 fidl::encoding::DefaultFuchsiaResourceDialect
17469 )
17470 });
17471 fidl::decode!(
17472 fidl::encoding::Endpoint<
17473 fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
17474 >,
17475 fidl::encoding::DefaultFuchsiaResourceDialect,
17476 val_ref,
17477 decoder,
17478 inner_offset,
17479 inner_depth
17480 )?;
17481 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
17482 {
17483 return Err(fidl::Error::InvalidNumBytesInEnvelope);
17484 }
17485 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
17486 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
17487 }
17488 }
17489
17490 next_offset += envelope_size;
17491 _next_ordinal_to_read += 1;
17492 if next_offset >= end_offset {
17493 return Ok(());
17494 }
17495
17496 // Decode unknown envelopes for gaps in ordinals.
17497 while _next_ordinal_to_read < 2 {
17498 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17499 _next_ordinal_to_read += 1;
17500 next_offset += envelope_size;
17501 }
17502
17503 let next_out_of_line = decoder.next_out_of_line();
17504 let handles_before = decoder.remaining_handles();
17505 if let Some((inlined, num_bytes, num_handles)) =
17506 fidl::encoding::decode_envelope_header(decoder, next_offset)?
17507 {
17508 let member_inline_size = <fidl::encoding::Endpoint<
17509 fidl::endpoints::ServerEnd<BufferCollectionMarker>,
17510 > as fidl::encoding::TypeMarker>::inline_size(
17511 decoder.context
17512 );
17513 if inlined != (member_inline_size <= 4) {
17514 return Err(fidl::Error::InvalidInlineBitInEnvelope);
17515 }
17516 let inner_offset;
17517 let mut inner_depth = depth.clone();
17518 if inlined {
17519 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
17520 inner_offset = next_offset;
17521 } else {
17522 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
17523 inner_depth.increment()?;
17524 }
17525 let val_ref = self.buffer_collection_request.get_or_insert_with(|| {
17526 fidl::new_empty!(
17527 fidl::encoding::Endpoint<
17528 fidl::endpoints::ServerEnd<BufferCollectionMarker>,
17529 >,
17530 fidl::encoding::DefaultFuchsiaResourceDialect
17531 )
17532 });
17533 fidl::decode!(
17534 fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionMarker>>,
17535 fidl::encoding::DefaultFuchsiaResourceDialect,
17536 val_ref,
17537 decoder,
17538 inner_offset,
17539 inner_depth
17540 )?;
17541 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
17542 {
17543 return Err(fidl::Error::InvalidNumBytesInEnvelope);
17544 }
17545 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
17546 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
17547 }
17548 }
17549
17550 next_offset += envelope_size;
17551
17552 // Decode the remaining unknown envelopes.
17553 while next_offset < end_offset {
17554 _next_ordinal_to_read += 1;
17555 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17556 next_offset += envelope_size;
17557 }
17558
17559 Ok(())
17560 }
17561 }
17562
17563 impl AllocatorGetVmoInfoRequest {
17564 #[inline(always)]
17565 fn max_ordinal_present(&self) -> u64 {
17566 if let Some(_) = self.vmo {
17567 return 1;
17568 }
17569 0
17570 }
17571 }
17572
17573 impl fidl::encoding::ResourceTypeMarker for AllocatorGetVmoInfoRequest {
17574 type Borrowed<'a> = &'a mut Self;
17575 fn take_or_borrow<'a>(
17576 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
17577 ) -> Self::Borrowed<'a> {
17578 value
17579 }
17580 }
17581
17582 unsafe impl fidl::encoding::TypeMarker for AllocatorGetVmoInfoRequest {
17583 type Owned = Self;
17584
17585 #[inline(always)]
17586 fn inline_align(_context: fidl::encoding::Context) -> usize {
17587 8
17588 }
17589
17590 #[inline(always)]
17591 fn inline_size(_context: fidl::encoding::Context) -> usize {
17592 16
17593 }
17594 }
17595
17596 unsafe impl
17597 fidl::encoding::Encode<
17598 AllocatorGetVmoInfoRequest,
17599 fidl::encoding::DefaultFuchsiaResourceDialect,
17600 > for &mut AllocatorGetVmoInfoRequest
17601 {
17602 unsafe fn encode(
17603 self,
17604 encoder: &mut fidl::encoding::Encoder<
17605 '_,
17606 fidl::encoding::DefaultFuchsiaResourceDialect,
17607 >,
17608 offset: usize,
17609 mut depth: fidl::encoding::Depth,
17610 ) -> fidl::Result<()> {
17611 encoder.debug_check_bounds::<AllocatorGetVmoInfoRequest>(offset);
17612 // Vector header
17613 let max_ordinal: u64 = self.max_ordinal_present();
17614 encoder.write_num(max_ordinal, offset);
17615 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
17616 // Calling encoder.out_of_line_offset(0) is not allowed.
17617 if max_ordinal == 0 {
17618 return Ok(());
17619 }
17620 depth.increment()?;
17621 let envelope_size = 8;
17622 let bytes_len = max_ordinal as usize * envelope_size;
17623 #[allow(unused_variables)]
17624 let offset = encoder.out_of_line_offset(bytes_len);
17625 let mut _prev_end_offset: usize = 0;
17626 if 1 > max_ordinal {
17627 return Ok(());
17628 }
17629
17630 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
17631 // are envelope_size bytes.
17632 let cur_offset: usize = (1 - 1) * envelope_size;
17633
17634 // Zero reserved fields.
17635 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
17636
17637 // Safety:
17638 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
17639 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
17640 // envelope_size bytes, there is always sufficient room.
17641 fidl::encoding::encode_in_envelope_optional::<
17642 fidl::encoding::HandleType<
17643 fidl::Vmo,
17644 { fidl::ObjectType::VMO.into_raw() },
17645 2147483648,
17646 >,
17647 fidl::encoding::DefaultFuchsiaResourceDialect,
17648 >(
17649 self.vmo.as_mut().map(
17650 <fidl::encoding::HandleType<
17651 fidl::Vmo,
17652 { fidl::ObjectType::VMO.into_raw() },
17653 2147483648,
17654 > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
17655 ),
17656 encoder,
17657 offset + cur_offset,
17658 depth,
17659 )?;
17660
17661 _prev_end_offset = cur_offset + envelope_size;
17662
17663 Ok(())
17664 }
17665 }
17666
17667 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
17668 for AllocatorGetVmoInfoRequest
17669 {
17670 #[inline(always)]
17671 fn new_empty() -> Self {
17672 Self::default()
17673 }
17674
17675 unsafe fn decode(
17676 &mut self,
17677 decoder: &mut fidl::encoding::Decoder<
17678 '_,
17679 fidl::encoding::DefaultFuchsiaResourceDialect,
17680 >,
17681 offset: usize,
17682 mut depth: fidl::encoding::Depth,
17683 ) -> fidl::Result<()> {
17684 decoder.debug_check_bounds::<Self>(offset);
17685 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
17686 None => return Err(fidl::Error::NotNullable),
17687 Some(len) => len,
17688 };
17689 // Calling decoder.out_of_line_offset(0) is not allowed.
17690 if len == 0 {
17691 return Ok(());
17692 };
17693 depth.increment()?;
17694 let envelope_size = 8;
17695 let bytes_len = len * envelope_size;
17696 let offset = decoder.out_of_line_offset(bytes_len)?;
17697 // Decode the envelope for each type.
17698 let mut _next_ordinal_to_read = 0;
17699 let mut next_offset = offset;
17700 let end_offset = offset + bytes_len;
17701 _next_ordinal_to_read += 1;
17702 if next_offset >= end_offset {
17703 return Ok(());
17704 }
17705
17706 // Decode unknown envelopes for gaps in ordinals.
17707 while _next_ordinal_to_read < 1 {
17708 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17709 _next_ordinal_to_read += 1;
17710 next_offset += envelope_size;
17711 }
17712
17713 let next_out_of_line = decoder.next_out_of_line();
17714 let handles_before = decoder.remaining_handles();
17715 if let Some((inlined, num_bytes, num_handles)) =
17716 fidl::encoding::decode_envelope_header(decoder, next_offset)?
17717 {
17718 let member_inline_size = <fidl::encoding::HandleType<
17719 fidl::Vmo,
17720 { fidl::ObjectType::VMO.into_raw() },
17721 2147483648,
17722 > as fidl::encoding::TypeMarker>::inline_size(
17723 decoder.context
17724 );
17725 if inlined != (member_inline_size <= 4) {
17726 return Err(fidl::Error::InvalidInlineBitInEnvelope);
17727 }
17728 let inner_offset;
17729 let mut inner_depth = depth.clone();
17730 if inlined {
17731 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
17732 inner_offset = next_offset;
17733 } else {
17734 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
17735 inner_depth.increment()?;
17736 }
17737 let val_ref =
17738 self.vmo.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::Vmo, { fidl::ObjectType::VMO.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
17739 fidl::decode!(fidl::encoding::HandleType<fidl::Vmo, { fidl::ObjectType::VMO.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
17740 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
17741 {
17742 return Err(fidl::Error::InvalidNumBytesInEnvelope);
17743 }
17744 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
17745 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
17746 }
17747 }
17748
17749 next_offset += envelope_size;
17750
17751 // Decode the remaining unknown envelopes.
17752 while next_offset < end_offset {
17753 _next_ordinal_to_read += 1;
17754 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17755 next_offset += envelope_size;
17756 }
17757
17758 Ok(())
17759 }
17760 }
17761
17762 impl AllocatorGetVmoInfoResponse {
17763 #[inline(always)]
17764 fn max_ordinal_present(&self) -> u64 {
17765 if let Some(_) = self.close_weak_asap {
17766 return 3;
17767 }
17768 if let Some(_) = self.buffer_index {
17769 return 2;
17770 }
17771 if let Some(_) = self.buffer_collection_id {
17772 return 1;
17773 }
17774 0
17775 }
17776 }
17777
17778 impl fidl::encoding::ResourceTypeMarker for AllocatorGetVmoInfoResponse {
17779 type Borrowed<'a> = &'a mut Self;
17780 fn take_or_borrow<'a>(
17781 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
17782 ) -> Self::Borrowed<'a> {
17783 value
17784 }
17785 }
17786
17787 unsafe impl fidl::encoding::TypeMarker for AllocatorGetVmoInfoResponse {
17788 type Owned = Self;
17789
17790 #[inline(always)]
17791 fn inline_align(_context: fidl::encoding::Context) -> usize {
17792 8
17793 }
17794
17795 #[inline(always)]
17796 fn inline_size(_context: fidl::encoding::Context) -> usize {
17797 16
17798 }
17799 }
17800
17801 unsafe impl
17802 fidl::encoding::Encode<
17803 AllocatorGetVmoInfoResponse,
17804 fidl::encoding::DefaultFuchsiaResourceDialect,
17805 > for &mut AllocatorGetVmoInfoResponse
17806 {
17807 unsafe fn encode(
17808 self,
17809 encoder: &mut fidl::encoding::Encoder<
17810 '_,
17811 fidl::encoding::DefaultFuchsiaResourceDialect,
17812 >,
17813 offset: usize,
17814 mut depth: fidl::encoding::Depth,
17815 ) -> fidl::Result<()> {
17816 encoder.debug_check_bounds::<AllocatorGetVmoInfoResponse>(offset);
17817 // Vector header
17818 let max_ordinal: u64 = self.max_ordinal_present();
17819 encoder.write_num(max_ordinal, offset);
17820 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
17821 // Calling encoder.out_of_line_offset(0) is not allowed.
17822 if max_ordinal == 0 {
17823 return Ok(());
17824 }
17825 depth.increment()?;
17826 let envelope_size = 8;
17827 let bytes_len = max_ordinal as usize * envelope_size;
17828 #[allow(unused_variables)]
17829 let offset = encoder.out_of_line_offset(bytes_len);
17830 let mut _prev_end_offset: usize = 0;
17831 if 1 > max_ordinal {
17832 return Ok(());
17833 }
17834
17835 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
17836 // are envelope_size bytes.
17837 let cur_offset: usize = (1 - 1) * envelope_size;
17838
17839 // Zero reserved fields.
17840 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
17841
17842 // Safety:
17843 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
17844 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
17845 // envelope_size bytes, there is always sufficient room.
17846 fidl::encoding::encode_in_envelope_optional::<
17847 u64,
17848 fidl::encoding::DefaultFuchsiaResourceDialect,
17849 >(
17850 self.buffer_collection_id
17851 .as_ref()
17852 .map(<u64 as fidl::encoding::ValueTypeMarker>::borrow),
17853 encoder,
17854 offset + cur_offset,
17855 depth,
17856 )?;
17857
17858 _prev_end_offset = cur_offset + envelope_size;
17859 if 2 > max_ordinal {
17860 return Ok(());
17861 }
17862
17863 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
17864 // are envelope_size bytes.
17865 let cur_offset: usize = (2 - 1) * envelope_size;
17866
17867 // Zero reserved fields.
17868 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
17869
17870 // Safety:
17871 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
17872 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
17873 // envelope_size bytes, there is always sufficient room.
17874 fidl::encoding::encode_in_envelope_optional::<
17875 u64,
17876 fidl::encoding::DefaultFuchsiaResourceDialect,
17877 >(
17878 self.buffer_index.as_ref().map(<u64 as fidl::encoding::ValueTypeMarker>::borrow),
17879 encoder,
17880 offset + cur_offset,
17881 depth,
17882 )?;
17883
17884 _prev_end_offset = cur_offset + envelope_size;
17885 if 3 > max_ordinal {
17886 return Ok(());
17887 }
17888
17889 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
17890 // are envelope_size bytes.
17891 let cur_offset: usize = (3 - 1) * envelope_size;
17892
17893 // Zero reserved fields.
17894 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
17895
17896 // Safety:
17897 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
17898 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
17899 // envelope_size bytes, there is always sufficient room.
17900 fidl::encoding::encode_in_envelope_optional::<
17901 fidl::encoding::HandleType<
17902 fidl::EventPair,
17903 { fidl::ObjectType::EVENTPAIR.into_raw() },
17904 2147483648,
17905 >,
17906 fidl::encoding::DefaultFuchsiaResourceDialect,
17907 >(
17908 self.close_weak_asap.as_mut().map(
17909 <fidl::encoding::HandleType<
17910 fidl::EventPair,
17911 { fidl::ObjectType::EVENTPAIR.into_raw() },
17912 2147483648,
17913 > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
17914 ),
17915 encoder,
17916 offset + cur_offset,
17917 depth,
17918 )?;
17919
17920 _prev_end_offset = cur_offset + envelope_size;
17921
17922 Ok(())
17923 }
17924 }
17925
17926 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
17927 for AllocatorGetVmoInfoResponse
17928 {
17929 #[inline(always)]
17930 fn new_empty() -> Self {
17931 Self::default()
17932 }
17933
17934 unsafe fn decode(
17935 &mut self,
17936 decoder: &mut fidl::encoding::Decoder<
17937 '_,
17938 fidl::encoding::DefaultFuchsiaResourceDialect,
17939 >,
17940 offset: usize,
17941 mut depth: fidl::encoding::Depth,
17942 ) -> fidl::Result<()> {
17943 decoder.debug_check_bounds::<Self>(offset);
17944 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
17945 None => return Err(fidl::Error::NotNullable),
17946 Some(len) => len,
17947 };
17948 // Calling decoder.out_of_line_offset(0) is not allowed.
17949 if len == 0 {
17950 return Ok(());
17951 };
17952 depth.increment()?;
17953 let envelope_size = 8;
17954 let bytes_len = len * envelope_size;
17955 let offset = decoder.out_of_line_offset(bytes_len)?;
17956 // Decode the envelope for each type.
17957 let mut _next_ordinal_to_read = 0;
17958 let mut next_offset = offset;
17959 let end_offset = offset + bytes_len;
17960 _next_ordinal_to_read += 1;
17961 if next_offset >= end_offset {
17962 return Ok(());
17963 }
17964
17965 // Decode unknown envelopes for gaps in ordinals.
17966 while _next_ordinal_to_read < 1 {
17967 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17968 _next_ordinal_to_read += 1;
17969 next_offset += envelope_size;
17970 }
17971
17972 let next_out_of_line = decoder.next_out_of_line();
17973 let handles_before = decoder.remaining_handles();
17974 if let Some((inlined, num_bytes, num_handles)) =
17975 fidl::encoding::decode_envelope_header(decoder, next_offset)?
17976 {
17977 let member_inline_size =
17978 <u64 as fidl::encoding::TypeMarker>::inline_size(decoder.context);
17979 if inlined != (member_inline_size <= 4) {
17980 return Err(fidl::Error::InvalidInlineBitInEnvelope);
17981 }
17982 let inner_offset;
17983 let mut inner_depth = depth.clone();
17984 if inlined {
17985 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
17986 inner_offset = next_offset;
17987 } else {
17988 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
17989 inner_depth.increment()?;
17990 }
17991 let val_ref = self.buffer_collection_id.get_or_insert_with(|| {
17992 fidl::new_empty!(u64, fidl::encoding::DefaultFuchsiaResourceDialect)
17993 });
17994 fidl::decode!(
17995 u64,
17996 fidl::encoding::DefaultFuchsiaResourceDialect,
17997 val_ref,
17998 decoder,
17999 inner_offset,
18000 inner_depth
18001 )?;
18002 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18003 {
18004 return Err(fidl::Error::InvalidNumBytesInEnvelope);
18005 }
18006 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18007 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18008 }
18009 }
18010
18011 next_offset += envelope_size;
18012 _next_ordinal_to_read += 1;
18013 if next_offset >= end_offset {
18014 return Ok(());
18015 }
18016
18017 // Decode unknown envelopes for gaps in ordinals.
18018 while _next_ordinal_to_read < 2 {
18019 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18020 _next_ordinal_to_read += 1;
18021 next_offset += envelope_size;
18022 }
18023
18024 let next_out_of_line = decoder.next_out_of_line();
18025 let handles_before = decoder.remaining_handles();
18026 if let Some((inlined, num_bytes, num_handles)) =
18027 fidl::encoding::decode_envelope_header(decoder, next_offset)?
18028 {
18029 let member_inline_size =
18030 <u64 as fidl::encoding::TypeMarker>::inline_size(decoder.context);
18031 if inlined != (member_inline_size <= 4) {
18032 return Err(fidl::Error::InvalidInlineBitInEnvelope);
18033 }
18034 let inner_offset;
18035 let mut inner_depth = depth.clone();
18036 if inlined {
18037 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18038 inner_offset = next_offset;
18039 } else {
18040 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18041 inner_depth.increment()?;
18042 }
18043 let val_ref = self.buffer_index.get_or_insert_with(|| {
18044 fidl::new_empty!(u64, fidl::encoding::DefaultFuchsiaResourceDialect)
18045 });
18046 fidl::decode!(
18047 u64,
18048 fidl::encoding::DefaultFuchsiaResourceDialect,
18049 val_ref,
18050 decoder,
18051 inner_offset,
18052 inner_depth
18053 )?;
18054 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18055 {
18056 return Err(fidl::Error::InvalidNumBytesInEnvelope);
18057 }
18058 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18059 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18060 }
18061 }
18062
18063 next_offset += envelope_size;
18064 _next_ordinal_to_read += 1;
18065 if next_offset >= end_offset {
18066 return Ok(());
18067 }
18068
18069 // Decode unknown envelopes for gaps in ordinals.
18070 while _next_ordinal_to_read < 3 {
18071 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18072 _next_ordinal_to_read += 1;
18073 next_offset += envelope_size;
18074 }
18075
18076 let next_out_of_line = decoder.next_out_of_line();
18077 let handles_before = decoder.remaining_handles();
18078 if let Some((inlined, num_bytes, num_handles)) =
18079 fidl::encoding::decode_envelope_header(decoder, next_offset)?
18080 {
18081 let member_inline_size = <fidl::encoding::HandleType<
18082 fidl::EventPair,
18083 { fidl::ObjectType::EVENTPAIR.into_raw() },
18084 2147483648,
18085 > as fidl::encoding::TypeMarker>::inline_size(
18086 decoder.context
18087 );
18088 if inlined != (member_inline_size <= 4) {
18089 return Err(fidl::Error::InvalidInlineBitInEnvelope);
18090 }
18091 let inner_offset;
18092 let mut inner_depth = depth.clone();
18093 if inlined {
18094 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18095 inner_offset = next_offset;
18096 } else {
18097 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18098 inner_depth.increment()?;
18099 }
18100 let val_ref =
18101 self.close_weak_asap.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
18102 fidl::decode!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
18103 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18104 {
18105 return Err(fidl::Error::InvalidNumBytesInEnvelope);
18106 }
18107 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18108 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18109 }
18110 }
18111
18112 next_offset += envelope_size;
18113
18114 // Decode the remaining unknown envelopes.
18115 while next_offset < end_offset {
18116 _next_ordinal_to_read += 1;
18117 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18118 next_offset += envelope_size;
18119 }
18120
18121 Ok(())
18122 }
18123 }
18124
18125 impl BufferCollectionAttachLifetimeTrackingRequest {
18126 #[inline(always)]
18127 fn max_ordinal_present(&self) -> u64 {
18128 if let Some(_) = self.buffers_remaining {
18129 return 2;
18130 }
18131 if let Some(_) = self.server_end {
18132 return 1;
18133 }
18134 0
18135 }
18136 }
18137
18138 impl fidl::encoding::ResourceTypeMarker for BufferCollectionAttachLifetimeTrackingRequest {
18139 type Borrowed<'a> = &'a mut Self;
18140 fn take_or_borrow<'a>(
18141 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
18142 ) -> Self::Borrowed<'a> {
18143 value
18144 }
18145 }
18146
18147 unsafe impl fidl::encoding::TypeMarker for BufferCollectionAttachLifetimeTrackingRequest {
18148 type Owned = Self;
18149
18150 #[inline(always)]
18151 fn inline_align(_context: fidl::encoding::Context) -> usize {
18152 8
18153 }
18154
18155 #[inline(always)]
18156 fn inline_size(_context: fidl::encoding::Context) -> usize {
18157 16
18158 }
18159 }
18160
18161 unsafe impl
18162 fidl::encoding::Encode<
18163 BufferCollectionAttachLifetimeTrackingRequest,
18164 fidl::encoding::DefaultFuchsiaResourceDialect,
18165 > for &mut BufferCollectionAttachLifetimeTrackingRequest
18166 {
18167 unsafe fn encode(
18168 self,
18169 encoder: &mut fidl::encoding::Encoder<
18170 '_,
18171 fidl::encoding::DefaultFuchsiaResourceDialect,
18172 >,
18173 offset: usize,
18174 mut depth: fidl::encoding::Depth,
18175 ) -> fidl::Result<()> {
18176 encoder.debug_check_bounds::<BufferCollectionAttachLifetimeTrackingRequest>(offset);
18177 // Vector header
18178 let max_ordinal: u64 = self.max_ordinal_present();
18179 encoder.write_num(max_ordinal, offset);
18180 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
18181 // Calling encoder.out_of_line_offset(0) is not allowed.
18182 if max_ordinal == 0 {
18183 return Ok(());
18184 }
18185 depth.increment()?;
18186 let envelope_size = 8;
18187 let bytes_len = max_ordinal as usize * envelope_size;
18188 #[allow(unused_variables)]
18189 let offset = encoder.out_of_line_offset(bytes_len);
18190 let mut _prev_end_offset: usize = 0;
18191 if 1 > max_ordinal {
18192 return Ok(());
18193 }
18194
18195 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
18196 // are envelope_size bytes.
18197 let cur_offset: usize = (1 - 1) * envelope_size;
18198
18199 // Zero reserved fields.
18200 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
18201
18202 // Safety:
18203 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
18204 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
18205 // envelope_size bytes, there is always sufficient room.
18206 fidl::encoding::encode_in_envelope_optional::<
18207 fidl::encoding::HandleType<
18208 fidl::EventPair,
18209 { fidl::ObjectType::EVENTPAIR.into_raw() },
18210 2147483648,
18211 >,
18212 fidl::encoding::DefaultFuchsiaResourceDialect,
18213 >(
18214 self.server_end.as_mut().map(
18215 <fidl::encoding::HandleType<
18216 fidl::EventPair,
18217 { fidl::ObjectType::EVENTPAIR.into_raw() },
18218 2147483648,
18219 > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
18220 ),
18221 encoder,
18222 offset + cur_offset,
18223 depth,
18224 )?;
18225
18226 _prev_end_offset = cur_offset + envelope_size;
18227 if 2 > max_ordinal {
18228 return Ok(());
18229 }
18230
18231 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
18232 // are envelope_size bytes.
18233 let cur_offset: usize = (2 - 1) * envelope_size;
18234
18235 // Zero reserved fields.
18236 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
18237
18238 // Safety:
18239 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
18240 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
18241 // envelope_size bytes, there is always sufficient room.
18242 fidl::encoding::encode_in_envelope_optional::<
18243 u32,
18244 fidl::encoding::DefaultFuchsiaResourceDialect,
18245 >(
18246 self.buffers_remaining
18247 .as_ref()
18248 .map(<u32 as fidl::encoding::ValueTypeMarker>::borrow),
18249 encoder,
18250 offset + cur_offset,
18251 depth,
18252 )?;
18253
18254 _prev_end_offset = cur_offset + envelope_size;
18255
18256 Ok(())
18257 }
18258 }
18259
18260 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
18261 for BufferCollectionAttachLifetimeTrackingRequest
18262 {
18263 #[inline(always)]
18264 fn new_empty() -> Self {
18265 Self::default()
18266 }
18267
18268 unsafe fn decode(
18269 &mut self,
18270 decoder: &mut fidl::encoding::Decoder<
18271 '_,
18272 fidl::encoding::DefaultFuchsiaResourceDialect,
18273 >,
18274 offset: usize,
18275 mut depth: fidl::encoding::Depth,
18276 ) -> fidl::Result<()> {
18277 decoder.debug_check_bounds::<Self>(offset);
18278 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
18279 None => return Err(fidl::Error::NotNullable),
18280 Some(len) => len,
18281 };
18282 // Calling decoder.out_of_line_offset(0) is not allowed.
18283 if len == 0 {
18284 return Ok(());
18285 };
18286 depth.increment()?;
18287 let envelope_size = 8;
18288 let bytes_len = len * envelope_size;
18289 let offset = decoder.out_of_line_offset(bytes_len)?;
18290 // Decode the envelope for each type.
18291 let mut _next_ordinal_to_read = 0;
18292 let mut next_offset = offset;
18293 let end_offset = offset + bytes_len;
18294 _next_ordinal_to_read += 1;
18295 if next_offset >= end_offset {
18296 return Ok(());
18297 }
18298
18299 // Decode unknown envelopes for gaps in ordinals.
18300 while _next_ordinal_to_read < 1 {
18301 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18302 _next_ordinal_to_read += 1;
18303 next_offset += envelope_size;
18304 }
18305
18306 let next_out_of_line = decoder.next_out_of_line();
18307 let handles_before = decoder.remaining_handles();
18308 if let Some((inlined, num_bytes, num_handles)) =
18309 fidl::encoding::decode_envelope_header(decoder, next_offset)?
18310 {
18311 let member_inline_size = <fidl::encoding::HandleType<
18312 fidl::EventPair,
18313 { fidl::ObjectType::EVENTPAIR.into_raw() },
18314 2147483648,
18315 > as fidl::encoding::TypeMarker>::inline_size(
18316 decoder.context
18317 );
18318 if inlined != (member_inline_size <= 4) {
18319 return Err(fidl::Error::InvalidInlineBitInEnvelope);
18320 }
18321 let inner_offset;
18322 let mut inner_depth = depth.clone();
18323 if inlined {
18324 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18325 inner_offset = next_offset;
18326 } else {
18327 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18328 inner_depth.increment()?;
18329 }
18330 let val_ref =
18331 self.server_end.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
18332 fidl::decode!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
18333 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18334 {
18335 return Err(fidl::Error::InvalidNumBytesInEnvelope);
18336 }
18337 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18338 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18339 }
18340 }
18341
18342 next_offset += envelope_size;
18343 _next_ordinal_to_read += 1;
18344 if next_offset >= end_offset {
18345 return Ok(());
18346 }
18347
18348 // Decode unknown envelopes for gaps in ordinals.
18349 while _next_ordinal_to_read < 2 {
18350 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18351 _next_ordinal_to_read += 1;
18352 next_offset += envelope_size;
18353 }
18354
18355 let next_out_of_line = decoder.next_out_of_line();
18356 let handles_before = decoder.remaining_handles();
18357 if let Some((inlined, num_bytes, num_handles)) =
18358 fidl::encoding::decode_envelope_header(decoder, next_offset)?
18359 {
18360 let member_inline_size =
18361 <u32 as fidl::encoding::TypeMarker>::inline_size(decoder.context);
18362 if inlined != (member_inline_size <= 4) {
18363 return Err(fidl::Error::InvalidInlineBitInEnvelope);
18364 }
18365 let inner_offset;
18366 let mut inner_depth = depth.clone();
18367 if inlined {
18368 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18369 inner_offset = next_offset;
18370 } else {
18371 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18372 inner_depth.increment()?;
18373 }
18374 let val_ref = self.buffers_remaining.get_or_insert_with(|| {
18375 fidl::new_empty!(u32, fidl::encoding::DefaultFuchsiaResourceDialect)
18376 });
18377 fidl::decode!(
18378 u32,
18379 fidl::encoding::DefaultFuchsiaResourceDialect,
18380 val_ref,
18381 decoder,
18382 inner_offset,
18383 inner_depth
18384 )?;
18385 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18386 {
18387 return Err(fidl::Error::InvalidNumBytesInEnvelope);
18388 }
18389 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18390 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18391 }
18392 }
18393
18394 next_offset += envelope_size;
18395
18396 // Decode the remaining unknown envelopes.
18397 while next_offset < end_offset {
18398 _next_ordinal_to_read += 1;
18399 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18400 next_offset += envelope_size;
18401 }
18402
18403 Ok(())
18404 }
18405 }
18406
18407 impl BufferCollectionAttachTokenRequest {
18408 #[inline(always)]
18409 fn max_ordinal_present(&self) -> u64 {
18410 if let Some(_) = self.token_request {
18411 return 2;
18412 }
18413 if let Some(_) = self.rights_attenuation_mask {
18414 return 1;
18415 }
18416 0
18417 }
18418 }
18419
18420 impl fidl::encoding::ResourceTypeMarker for BufferCollectionAttachTokenRequest {
18421 type Borrowed<'a> = &'a mut Self;
18422 fn take_or_borrow<'a>(
18423 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
18424 ) -> Self::Borrowed<'a> {
18425 value
18426 }
18427 }
18428
18429 unsafe impl fidl::encoding::TypeMarker for BufferCollectionAttachTokenRequest {
18430 type Owned = Self;
18431
18432 #[inline(always)]
18433 fn inline_align(_context: fidl::encoding::Context) -> usize {
18434 8
18435 }
18436
18437 #[inline(always)]
18438 fn inline_size(_context: fidl::encoding::Context) -> usize {
18439 16
18440 }
18441 }
18442
18443 unsafe impl
18444 fidl::encoding::Encode<
18445 BufferCollectionAttachTokenRequest,
18446 fidl::encoding::DefaultFuchsiaResourceDialect,
18447 > for &mut BufferCollectionAttachTokenRequest
18448 {
18449 unsafe fn encode(
18450 self,
18451 encoder: &mut fidl::encoding::Encoder<
18452 '_,
18453 fidl::encoding::DefaultFuchsiaResourceDialect,
18454 >,
18455 offset: usize,
18456 mut depth: fidl::encoding::Depth,
18457 ) -> fidl::Result<()> {
18458 encoder.debug_check_bounds::<BufferCollectionAttachTokenRequest>(offset);
18459 // Vector header
18460 let max_ordinal: u64 = self.max_ordinal_present();
18461 encoder.write_num(max_ordinal, offset);
18462 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
18463 // Calling encoder.out_of_line_offset(0) is not allowed.
18464 if max_ordinal == 0 {
18465 return Ok(());
18466 }
18467 depth.increment()?;
18468 let envelope_size = 8;
18469 let bytes_len = max_ordinal as usize * envelope_size;
18470 #[allow(unused_variables)]
18471 let offset = encoder.out_of_line_offset(bytes_len);
18472 let mut _prev_end_offset: usize = 0;
18473 if 1 > max_ordinal {
18474 return Ok(());
18475 }
18476
18477 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
18478 // are envelope_size bytes.
18479 let cur_offset: usize = (1 - 1) * envelope_size;
18480
18481 // Zero reserved fields.
18482 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
18483
18484 // Safety:
18485 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
18486 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
18487 // envelope_size bytes, there is always sufficient room.
18488 fidl::encoding::encode_in_envelope_optional::<
18489 fidl::Rights,
18490 fidl::encoding::DefaultFuchsiaResourceDialect,
18491 >(
18492 self.rights_attenuation_mask
18493 .as_ref()
18494 .map(<fidl::Rights as fidl::encoding::ValueTypeMarker>::borrow),
18495 encoder,
18496 offset + cur_offset,
18497 depth,
18498 )?;
18499
18500 _prev_end_offset = cur_offset + envelope_size;
18501 if 2 > max_ordinal {
18502 return Ok(());
18503 }
18504
18505 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
18506 // are envelope_size bytes.
18507 let cur_offset: usize = (2 - 1) * envelope_size;
18508
18509 // Zero reserved fields.
18510 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
18511
18512 // Safety:
18513 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
18514 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
18515 // envelope_size bytes, there is always sufficient room.
18516 fidl::encoding::encode_in_envelope_optional::<
18517 fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
18518 fidl::encoding::DefaultFuchsiaResourceDialect,
18519 >(
18520 self.token_request.as_mut().map(
18521 <fidl::encoding::Endpoint<
18522 fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
18523 > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
18524 ),
18525 encoder,
18526 offset + cur_offset,
18527 depth,
18528 )?;
18529
18530 _prev_end_offset = cur_offset + envelope_size;
18531
18532 Ok(())
18533 }
18534 }
18535
18536 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
18537 for BufferCollectionAttachTokenRequest
18538 {
18539 #[inline(always)]
18540 fn new_empty() -> Self {
18541 Self::default()
18542 }
18543
18544 unsafe fn decode(
18545 &mut self,
18546 decoder: &mut fidl::encoding::Decoder<
18547 '_,
18548 fidl::encoding::DefaultFuchsiaResourceDialect,
18549 >,
18550 offset: usize,
18551 mut depth: fidl::encoding::Depth,
18552 ) -> fidl::Result<()> {
18553 decoder.debug_check_bounds::<Self>(offset);
18554 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
18555 None => return Err(fidl::Error::NotNullable),
18556 Some(len) => len,
18557 };
18558 // Calling decoder.out_of_line_offset(0) is not allowed.
18559 if len == 0 {
18560 return Ok(());
18561 };
18562 depth.increment()?;
18563 let envelope_size = 8;
18564 let bytes_len = len * envelope_size;
18565 let offset = decoder.out_of_line_offset(bytes_len)?;
18566 // Decode the envelope for each type.
18567 let mut _next_ordinal_to_read = 0;
18568 let mut next_offset = offset;
18569 let end_offset = offset + bytes_len;
18570 _next_ordinal_to_read += 1;
18571 if next_offset >= end_offset {
18572 return Ok(());
18573 }
18574
18575 // Decode unknown envelopes for gaps in ordinals.
18576 while _next_ordinal_to_read < 1 {
18577 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18578 _next_ordinal_to_read += 1;
18579 next_offset += envelope_size;
18580 }
18581
18582 let next_out_of_line = decoder.next_out_of_line();
18583 let handles_before = decoder.remaining_handles();
18584 if let Some((inlined, num_bytes, num_handles)) =
18585 fidl::encoding::decode_envelope_header(decoder, next_offset)?
18586 {
18587 let member_inline_size =
18588 <fidl::Rights as fidl::encoding::TypeMarker>::inline_size(decoder.context);
18589 if inlined != (member_inline_size <= 4) {
18590 return Err(fidl::Error::InvalidInlineBitInEnvelope);
18591 }
18592 let inner_offset;
18593 let mut inner_depth = depth.clone();
18594 if inlined {
18595 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18596 inner_offset = next_offset;
18597 } else {
18598 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18599 inner_depth.increment()?;
18600 }
18601 let val_ref = self.rights_attenuation_mask.get_or_insert_with(|| {
18602 fidl::new_empty!(fidl::Rights, fidl::encoding::DefaultFuchsiaResourceDialect)
18603 });
18604 fidl::decode!(
18605 fidl::Rights,
18606 fidl::encoding::DefaultFuchsiaResourceDialect,
18607 val_ref,
18608 decoder,
18609 inner_offset,
18610 inner_depth
18611 )?;
18612 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18613 {
18614 return Err(fidl::Error::InvalidNumBytesInEnvelope);
18615 }
18616 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18617 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18618 }
18619 }
18620
18621 next_offset += envelope_size;
18622 _next_ordinal_to_read += 1;
18623 if next_offset >= end_offset {
18624 return Ok(());
18625 }
18626
18627 // Decode unknown envelopes for gaps in ordinals.
18628 while _next_ordinal_to_read < 2 {
18629 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18630 _next_ordinal_to_read += 1;
18631 next_offset += envelope_size;
18632 }
18633
18634 let next_out_of_line = decoder.next_out_of_line();
18635 let handles_before = decoder.remaining_handles();
18636 if let Some((inlined, num_bytes, num_handles)) =
18637 fidl::encoding::decode_envelope_header(decoder, next_offset)?
18638 {
18639 let member_inline_size = <fidl::encoding::Endpoint<
18640 fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
18641 > as fidl::encoding::TypeMarker>::inline_size(
18642 decoder.context
18643 );
18644 if inlined != (member_inline_size <= 4) {
18645 return Err(fidl::Error::InvalidInlineBitInEnvelope);
18646 }
18647 let inner_offset;
18648 let mut inner_depth = depth.clone();
18649 if inlined {
18650 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18651 inner_offset = next_offset;
18652 } else {
18653 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18654 inner_depth.increment()?;
18655 }
18656 let val_ref = self.token_request.get_or_insert_with(|| {
18657 fidl::new_empty!(
18658 fidl::encoding::Endpoint<
18659 fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
18660 >,
18661 fidl::encoding::DefaultFuchsiaResourceDialect
18662 )
18663 });
18664 fidl::decode!(
18665 fidl::encoding::Endpoint<
18666 fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
18667 >,
18668 fidl::encoding::DefaultFuchsiaResourceDialect,
18669 val_ref,
18670 decoder,
18671 inner_offset,
18672 inner_depth
18673 )?;
18674 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18675 {
18676 return Err(fidl::Error::InvalidNumBytesInEnvelope);
18677 }
18678 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18679 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18680 }
18681 }
18682
18683 next_offset += envelope_size;
18684
18685 // Decode the remaining unknown envelopes.
18686 while next_offset < end_offset {
18687 _next_ordinal_to_read += 1;
18688 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18689 next_offset += envelope_size;
18690 }
18691
18692 Ok(())
18693 }
18694 }
18695
18696 impl BufferCollectionInfo {
18697 #[inline(always)]
18698 fn max_ordinal_present(&self) -> u64 {
18699 if let Some(_) = self.buffer_collection_id {
18700 return 3;
18701 }
18702 if let Some(_) = self.buffers {
18703 return 2;
18704 }
18705 if let Some(_) = self.settings {
18706 return 1;
18707 }
18708 0
18709 }
18710 }
18711
18712 impl fidl::encoding::ResourceTypeMarker for BufferCollectionInfo {
18713 type Borrowed<'a> = &'a mut Self;
18714 fn take_or_borrow<'a>(
18715 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
18716 ) -> Self::Borrowed<'a> {
18717 value
18718 }
18719 }
18720
18721 unsafe impl fidl::encoding::TypeMarker for BufferCollectionInfo {
18722 type Owned = Self;
18723
18724 #[inline(always)]
18725 fn inline_align(_context: fidl::encoding::Context) -> usize {
18726 8
18727 }
18728
18729 #[inline(always)]
18730 fn inline_size(_context: fidl::encoding::Context) -> usize {
18731 16
18732 }
18733 }
18734
18735 unsafe impl
18736 fidl::encoding::Encode<BufferCollectionInfo, fidl::encoding::DefaultFuchsiaResourceDialect>
18737 for &mut BufferCollectionInfo
18738 {
18739 unsafe fn encode(
18740 self,
18741 encoder: &mut fidl::encoding::Encoder<
18742 '_,
18743 fidl::encoding::DefaultFuchsiaResourceDialect,
18744 >,
18745 offset: usize,
18746 mut depth: fidl::encoding::Depth,
18747 ) -> fidl::Result<()> {
18748 encoder.debug_check_bounds::<BufferCollectionInfo>(offset);
18749 // Vector header
18750 let max_ordinal: u64 = self.max_ordinal_present();
18751 encoder.write_num(max_ordinal, offset);
18752 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
18753 // Calling encoder.out_of_line_offset(0) is not allowed.
18754 if max_ordinal == 0 {
18755 return Ok(());
18756 }
18757 depth.increment()?;
18758 let envelope_size = 8;
18759 let bytes_len = max_ordinal as usize * envelope_size;
18760 #[allow(unused_variables)]
18761 let offset = encoder.out_of_line_offset(bytes_len);
18762 let mut _prev_end_offset: usize = 0;
18763 if 1 > max_ordinal {
18764 return Ok(());
18765 }
18766
18767 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
18768 // are envelope_size bytes.
18769 let cur_offset: usize = (1 - 1) * envelope_size;
18770
18771 // Zero reserved fields.
18772 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
18773
18774 // Safety:
18775 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
18776 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
18777 // envelope_size bytes, there is always sufficient room.
18778 fidl::encoding::encode_in_envelope_optional::<
18779 SingleBufferSettings,
18780 fidl::encoding::DefaultFuchsiaResourceDialect,
18781 >(
18782 self.settings
18783 .as_ref()
18784 .map(<SingleBufferSettings as fidl::encoding::ValueTypeMarker>::borrow),
18785 encoder,
18786 offset + cur_offset,
18787 depth,
18788 )?;
18789
18790 _prev_end_offset = cur_offset + envelope_size;
18791 if 2 > max_ordinal {
18792 return Ok(());
18793 }
18794
18795 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
18796 // are envelope_size bytes.
18797 let cur_offset: usize = (2 - 1) * envelope_size;
18798
18799 // Zero reserved fields.
18800 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
18801
18802 // Safety:
18803 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
18804 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
18805 // envelope_size bytes, there is always sufficient room.
18806 fidl::encoding::encode_in_envelope_optional::<fidl::encoding::Vector<VmoBuffer, 128>, fidl::encoding::DefaultFuchsiaResourceDialect>(
18807 self.buffers.as_mut().map(<fidl::encoding::Vector<VmoBuffer, 128> as fidl::encoding::ResourceTypeMarker>::take_or_borrow),
18808 encoder, offset + cur_offset, depth
18809 )?;
18810
18811 _prev_end_offset = cur_offset + envelope_size;
18812 if 3 > max_ordinal {
18813 return Ok(());
18814 }
18815
18816 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
18817 // are envelope_size bytes.
18818 let cur_offset: usize = (3 - 1) * envelope_size;
18819
18820 // Zero reserved fields.
18821 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
18822
18823 // Safety:
18824 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
18825 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
18826 // envelope_size bytes, there is always sufficient room.
18827 fidl::encoding::encode_in_envelope_optional::<
18828 u64,
18829 fidl::encoding::DefaultFuchsiaResourceDialect,
18830 >(
18831 self.buffer_collection_id
18832 .as_ref()
18833 .map(<u64 as fidl::encoding::ValueTypeMarker>::borrow),
18834 encoder,
18835 offset + cur_offset,
18836 depth,
18837 )?;
18838
18839 _prev_end_offset = cur_offset + envelope_size;
18840
18841 Ok(())
18842 }
18843 }
18844
18845 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
18846 for BufferCollectionInfo
18847 {
18848 #[inline(always)]
18849 fn new_empty() -> Self {
18850 Self::default()
18851 }
18852
18853 unsafe fn decode(
18854 &mut self,
18855 decoder: &mut fidl::encoding::Decoder<
18856 '_,
18857 fidl::encoding::DefaultFuchsiaResourceDialect,
18858 >,
18859 offset: usize,
18860 mut depth: fidl::encoding::Depth,
18861 ) -> fidl::Result<()> {
18862 decoder.debug_check_bounds::<Self>(offset);
18863 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
18864 None => return Err(fidl::Error::NotNullable),
18865 Some(len) => len,
18866 };
18867 // Calling decoder.out_of_line_offset(0) is not allowed.
18868 if len == 0 {
18869 return Ok(());
18870 };
18871 depth.increment()?;
18872 let envelope_size = 8;
18873 let bytes_len = len * envelope_size;
18874 let offset = decoder.out_of_line_offset(bytes_len)?;
18875 // Decode the envelope for each type.
18876 let mut _next_ordinal_to_read = 0;
18877 let mut next_offset = offset;
18878 let end_offset = offset + bytes_len;
18879 _next_ordinal_to_read += 1;
18880 if next_offset >= end_offset {
18881 return Ok(());
18882 }
18883
18884 // Decode unknown envelopes for gaps in ordinals.
18885 while _next_ordinal_to_read < 1 {
18886 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18887 _next_ordinal_to_read += 1;
18888 next_offset += envelope_size;
18889 }
18890
18891 let next_out_of_line = decoder.next_out_of_line();
18892 let handles_before = decoder.remaining_handles();
18893 if let Some((inlined, num_bytes, num_handles)) =
18894 fidl::encoding::decode_envelope_header(decoder, next_offset)?
18895 {
18896 let member_inline_size =
18897 <SingleBufferSettings as fidl::encoding::TypeMarker>::inline_size(
18898 decoder.context,
18899 );
18900 if inlined != (member_inline_size <= 4) {
18901 return Err(fidl::Error::InvalidInlineBitInEnvelope);
18902 }
18903 let inner_offset;
18904 let mut inner_depth = depth.clone();
18905 if inlined {
18906 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18907 inner_offset = next_offset;
18908 } else {
18909 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18910 inner_depth.increment()?;
18911 }
18912 let val_ref = self.settings.get_or_insert_with(|| {
18913 fidl::new_empty!(
18914 SingleBufferSettings,
18915 fidl::encoding::DefaultFuchsiaResourceDialect
18916 )
18917 });
18918 fidl::decode!(
18919 SingleBufferSettings,
18920 fidl::encoding::DefaultFuchsiaResourceDialect,
18921 val_ref,
18922 decoder,
18923 inner_offset,
18924 inner_depth
18925 )?;
18926 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18927 {
18928 return Err(fidl::Error::InvalidNumBytesInEnvelope);
18929 }
18930 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18931 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18932 }
18933 }
18934
18935 next_offset += envelope_size;
18936 _next_ordinal_to_read += 1;
18937 if next_offset >= end_offset {
18938 return Ok(());
18939 }
18940
18941 // Decode unknown envelopes for gaps in ordinals.
18942 while _next_ordinal_to_read < 2 {
18943 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18944 _next_ordinal_to_read += 1;
18945 next_offset += envelope_size;
18946 }
18947
18948 let next_out_of_line = decoder.next_out_of_line();
18949 let handles_before = decoder.remaining_handles();
18950 if let Some((inlined, num_bytes, num_handles)) =
18951 fidl::encoding::decode_envelope_header(decoder, next_offset)?
18952 {
18953 let member_inline_size = <fidl::encoding::Vector<VmoBuffer, 128> as fidl::encoding::TypeMarker>::inline_size(decoder.context);
18954 if inlined != (member_inline_size <= 4) {
18955 return Err(fidl::Error::InvalidInlineBitInEnvelope);
18956 }
18957 let inner_offset;
18958 let mut inner_depth = depth.clone();
18959 if inlined {
18960 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18961 inner_offset = next_offset;
18962 } else {
18963 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18964 inner_depth.increment()?;
18965 }
18966 let val_ref =
18967 self.buffers.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::Vector<VmoBuffer, 128>, fidl::encoding::DefaultFuchsiaResourceDialect));
18968 fidl::decode!(fidl::encoding::Vector<VmoBuffer, 128>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
18969 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18970 {
18971 return Err(fidl::Error::InvalidNumBytesInEnvelope);
18972 }
18973 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18974 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18975 }
18976 }
18977
18978 next_offset += envelope_size;
18979 _next_ordinal_to_read += 1;
18980 if next_offset >= end_offset {
18981 return Ok(());
18982 }
18983
18984 // Decode unknown envelopes for gaps in ordinals.
18985 while _next_ordinal_to_read < 3 {
18986 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18987 _next_ordinal_to_read += 1;
18988 next_offset += envelope_size;
18989 }
18990
18991 let next_out_of_line = decoder.next_out_of_line();
18992 let handles_before = decoder.remaining_handles();
18993 if let Some((inlined, num_bytes, num_handles)) =
18994 fidl::encoding::decode_envelope_header(decoder, next_offset)?
18995 {
18996 let member_inline_size =
18997 <u64 as fidl::encoding::TypeMarker>::inline_size(decoder.context);
18998 if inlined != (member_inline_size <= 4) {
18999 return Err(fidl::Error::InvalidInlineBitInEnvelope);
19000 }
19001 let inner_offset;
19002 let mut inner_depth = depth.clone();
19003 if inlined {
19004 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
19005 inner_offset = next_offset;
19006 } else {
19007 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
19008 inner_depth.increment()?;
19009 }
19010 let val_ref = self.buffer_collection_id.get_or_insert_with(|| {
19011 fidl::new_empty!(u64, fidl::encoding::DefaultFuchsiaResourceDialect)
19012 });
19013 fidl::decode!(
19014 u64,
19015 fidl::encoding::DefaultFuchsiaResourceDialect,
19016 val_ref,
19017 decoder,
19018 inner_offset,
19019 inner_depth
19020 )?;
19021 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
19022 {
19023 return Err(fidl::Error::InvalidNumBytesInEnvelope);
19024 }
19025 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
19026 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
19027 }
19028 }
19029
19030 next_offset += envelope_size;
19031
19032 // Decode the remaining unknown envelopes.
19033 while next_offset < end_offset {
19034 _next_ordinal_to_read += 1;
19035 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19036 next_offset += envelope_size;
19037 }
19038
19039 Ok(())
19040 }
19041 }
19042
19043 impl BufferCollectionSetConstraintsRequest {
19044 #[inline(always)]
19045 fn max_ordinal_present(&self) -> u64 {
19046 if let Some(_) = self.constraints {
19047 return 1;
19048 }
19049 0
19050 }
19051 }
19052
19053 impl fidl::encoding::ResourceTypeMarker for BufferCollectionSetConstraintsRequest {
19054 type Borrowed<'a> = &'a mut Self;
19055 fn take_or_borrow<'a>(
19056 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
19057 ) -> Self::Borrowed<'a> {
19058 value
19059 }
19060 }
19061
19062 unsafe impl fidl::encoding::TypeMarker for BufferCollectionSetConstraintsRequest {
19063 type Owned = Self;
19064
19065 #[inline(always)]
19066 fn inline_align(_context: fidl::encoding::Context) -> usize {
19067 8
19068 }
19069
19070 #[inline(always)]
19071 fn inline_size(_context: fidl::encoding::Context) -> usize {
19072 16
19073 }
19074 }
19075
19076 unsafe impl
19077 fidl::encoding::Encode<
19078 BufferCollectionSetConstraintsRequest,
19079 fidl::encoding::DefaultFuchsiaResourceDialect,
19080 > for &mut BufferCollectionSetConstraintsRequest
19081 {
19082 unsafe fn encode(
19083 self,
19084 encoder: &mut fidl::encoding::Encoder<
19085 '_,
19086 fidl::encoding::DefaultFuchsiaResourceDialect,
19087 >,
19088 offset: usize,
19089 mut depth: fidl::encoding::Depth,
19090 ) -> fidl::Result<()> {
19091 encoder.debug_check_bounds::<BufferCollectionSetConstraintsRequest>(offset);
19092 // Vector header
19093 let max_ordinal: u64 = self.max_ordinal_present();
19094 encoder.write_num(max_ordinal, offset);
19095 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
19096 // Calling encoder.out_of_line_offset(0) is not allowed.
19097 if max_ordinal == 0 {
19098 return Ok(());
19099 }
19100 depth.increment()?;
19101 let envelope_size = 8;
19102 let bytes_len = max_ordinal as usize * envelope_size;
19103 #[allow(unused_variables)]
19104 let offset = encoder.out_of_line_offset(bytes_len);
19105 let mut _prev_end_offset: usize = 0;
19106 if 1 > max_ordinal {
19107 return Ok(());
19108 }
19109
19110 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
19111 // are envelope_size bytes.
19112 let cur_offset: usize = (1 - 1) * envelope_size;
19113
19114 // Zero reserved fields.
19115 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
19116
19117 // Safety:
19118 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
19119 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
19120 // envelope_size bytes, there is always sufficient room.
19121 fidl::encoding::encode_in_envelope_optional::<
19122 BufferCollectionConstraints,
19123 fidl::encoding::DefaultFuchsiaResourceDialect,
19124 >(
19125 self.constraints
19126 .as_ref()
19127 .map(<BufferCollectionConstraints as fidl::encoding::ValueTypeMarker>::borrow),
19128 encoder,
19129 offset + cur_offset,
19130 depth,
19131 )?;
19132
19133 _prev_end_offset = cur_offset + envelope_size;
19134
19135 Ok(())
19136 }
19137 }
19138
19139 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
19140 for BufferCollectionSetConstraintsRequest
19141 {
19142 #[inline(always)]
19143 fn new_empty() -> Self {
19144 Self::default()
19145 }
19146
19147 unsafe fn decode(
19148 &mut self,
19149 decoder: &mut fidl::encoding::Decoder<
19150 '_,
19151 fidl::encoding::DefaultFuchsiaResourceDialect,
19152 >,
19153 offset: usize,
19154 mut depth: fidl::encoding::Depth,
19155 ) -> fidl::Result<()> {
19156 decoder.debug_check_bounds::<Self>(offset);
19157 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
19158 None => return Err(fidl::Error::NotNullable),
19159 Some(len) => len,
19160 };
19161 // Calling decoder.out_of_line_offset(0) is not allowed.
19162 if len == 0 {
19163 return Ok(());
19164 };
19165 depth.increment()?;
19166 let envelope_size = 8;
19167 let bytes_len = len * envelope_size;
19168 let offset = decoder.out_of_line_offset(bytes_len)?;
19169 // Decode the envelope for each type.
19170 let mut _next_ordinal_to_read = 0;
19171 let mut next_offset = offset;
19172 let end_offset = offset + bytes_len;
19173 _next_ordinal_to_read += 1;
19174 if next_offset >= end_offset {
19175 return Ok(());
19176 }
19177
19178 // Decode unknown envelopes for gaps in ordinals.
19179 while _next_ordinal_to_read < 1 {
19180 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19181 _next_ordinal_to_read += 1;
19182 next_offset += envelope_size;
19183 }
19184
19185 let next_out_of_line = decoder.next_out_of_line();
19186 let handles_before = decoder.remaining_handles();
19187 if let Some((inlined, num_bytes, num_handles)) =
19188 fidl::encoding::decode_envelope_header(decoder, next_offset)?
19189 {
19190 let member_inline_size =
19191 <BufferCollectionConstraints as fidl::encoding::TypeMarker>::inline_size(
19192 decoder.context,
19193 );
19194 if inlined != (member_inline_size <= 4) {
19195 return Err(fidl::Error::InvalidInlineBitInEnvelope);
19196 }
19197 let inner_offset;
19198 let mut inner_depth = depth.clone();
19199 if inlined {
19200 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
19201 inner_offset = next_offset;
19202 } else {
19203 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
19204 inner_depth.increment()?;
19205 }
19206 let val_ref = self.constraints.get_or_insert_with(|| {
19207 fidl::new_empty!(
19208 BufferCollectionConstraints,
19209 fidl::encoding::DefaultFuchsiaResourceDialect
19210 )
19211 });
19212 fidl::decode!(
19213 BufferCollectionConstraints,
19214 fidl::encoding::DefaultFuchsiaResourceDialect,
19215 val_ref,
19216 decoder,
19217 inner_offset,
19218 inner_depth
19219 )?;
19220 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
19221 {
19222 return Err(fidl::Error::InvalidNumBytesInEnvelope);
19223 }
19224 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
19225 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
19226 }
19227 }
19228
19229 next_offset += envelope_size;
19230
19231 // Decode the remaining unknown envelopes.
19232 while next_offset < end_offset {
19233 _next_ordinal_to_read += 1;
19234 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19235 next_offset += envelope_size;
19236 }
19237
19238 Ok(())
19239 }
19240 }
19241
19242 impl BufferCollectionTokenCreateBufferCollectionTokenGroupRequest {
19243 #[inline(always)]
19244 fn max_ordinal_present(&self) -> u64 {
19245 if let Some(_) = self.group_request {
19246 return 1;
19247 }
19248 0
19249 }
19250 }
19251
19252 impl fidl::encoding::ResourceTypeMarker
19253 for BufferCollectionTokenCreateBufferCollectionTokenGroupRequest
19254 {
19255 type Borrowed<'a> = &'a mut Self;
19256 fn take_or_borrow<'a>(
19257 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
19258 ) -> Self::Borrowed<'a> {
19259 value
19260 }
19261 }
19262
19263 unsafe impl fidl::encoding::TypeMarker
19264 for BufferCollectionTokenCreateBufferCollectionTokenGroupRequest
19265 {
19266 type Owned = Self;
19267
19268 #[inline(always)]
19269 fn inline_align(_context: fidl::encoding::Context) -> usize {
19270 8
19271 }
19272
19273 #[inline(always)]
19274 fn inline_size(_context: fidl::encoding::Context) -> usize {
19275 16
19276 }
19277 }
19278
19279 unsafe impl
19280 fidl::encoding::Encode<
19281 BufferCollectionTokenCreateBufferCollectionTokenGroupRequest,
19282 fidl::encoding::DefaultFuchsiaResourceDialect,
19283 > for &mut BufferCollectionTokenCreateBufferCollectionTokenGroupRequest
19284 {
19285 unsafe fn encode(
19286 self,
19287 encoder: &mut fidl::encoding::Encoder<
19288 '_,
19289 fidl::encoding::DefaultFuchsiaResourceDialect,
19290 >,
19291 offset: usize,
19292 mut depth: fidl::encoding::Depth,
19293 ) -> fidl::Result<()> {
19294 encoder
19295 .debug_check_bounds::<BufferCollectionTokenCreateBufferCollectionTokenGroupRequest>(
19296 offset,
19297 );
19298 // Vector header
19299 let max_ordinal: u64 = self.max_ordinal_present();
19300 encoder.write_num(max_ordinal, offset);
19301 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
19302 // Calling encoder.out_of_line_offset(0) is not allowed.
19303 if max_ordinal == 0 {
19304 return Ok(());
19305 }
19306 depth.increment()?;
19307 let envelope_size = 8;
19308 let bytes_len = max_ordinal as usize * envelope_size;
19309 #[allow(unused_variables)]
19310 let offset = encoder.out_of_line_offset(bytes_len);
19311 let mut _prev_end_offset: usize = 0;
19312 if 1 > max_ordinal {
19313 return Ok(());
19314 }
19315
19316 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
19317 // are envelope_size bytes.
19318 let cur_offset: usize = (1 - 1) * envelope_size;
19319
19320 // Zero reserved fields.
19321 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
19322
19323 // Safety:
19324 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
19325 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
19326 // envelope_size bytes, there is always sufficient room.
19327 fidl::encoding::encode_in_envelope_optional::<
19328 fidl::encoding::Endpoint<
19329 fidl::endpoints::ServerEnd<BufferCollectionTokenGroupMarker>,
19330 >,
19331 fidl::encoding::DefaultFuchsiaResourceDialect,
19332 >(
19333 self.group_request.as_mut().map(
19334 <fidl::encoding::Endpoint<
19335 fidl::endpoints::ServerEnd<BufferCollectionTokenGroupMarker>,
19336 > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
19337 ),
19338 encoder,
19339 offset + cur_offset,
19340 depth,
19341 )?;
19342
19343 _prev_end_offset = cur_offset + envelope_size;
19344
19345 Ok(())
19346 }
19347 }
19348
19349 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
19350 for BufferCollectionTokenCreateBufferCollectionTokenGroupRequest
19351 {
19352 #[inline(always)]
19353 fn new_empty() -> Self {
19354 Self::default()
19355 }
19356
19357 unsafe fn decode(
19358 &mut self,
19359 decoder: &mut fidl::encoding::Decoder<
19360 '_,
19361 fidl::encoding::DefaultFuchsiaResourceDialect,
19362 >,
19363 offset: usize,
19364 mut depth: fidl::encoding::Depth,
19365 ) -> fidl::Result<()> {
19366 decoder.debug_check_bounds::<Self>(offset);
19367 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
19368 None => return Err(fidl::Error::NotNullable),
19369 Some(len) => len,
19370 };
19371 // Calling decoder.out_of_line_offset(0) is not allowed.
19372 if len == 0 {
19373 return Ok(());
19374 };
19375 depth.increment()?;
19376 let envelope_size = 8;
19377 let bytes_len = len * envelope_size;
19378 let offset = decoder.out_of_line_offset(bytes_len)?;
19379 // Decode the envelope for each type.
19380 let mut _next_ordinal_to_read = 0;
19381 let mut next_offset = offset;
19382 let end_offset = offset + bytes_len;
19383 _next_ordinal_to_read += 1;
19384 if next_offset >= end_offset {
19385 return Ok(());
19386 }
19387
19388 // Decode unknown envelopes for gaps in ordinals.
19389 while _next_ordinal_to_read < 1 {
19390 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19391 _next_ordinal_to_read += 1;
19392 next_offset += envelope_size;
19393 }
19394
19395 let next_out_of_line = decoder.next_out_of_line();
19396 let handles_before = decoder.remaining_handles();
19397 if let Some((inlined, num_bytes, num_handles)) =
19398 fidl::encoding::decode_envelope_header(decoder, next_offset)?
19399 {
19400 let member_inline_size = <fidl::encoding::Endpoint<
19401 fidl::endpoints::ServerEnd<BufferCollectionTokenGroupMarker>,
19402 > as fidl::encoding::TypeMarker>::inline_size(
19403 decoder.context
19404 );
19405 if inlined != (member_inline_size <= 4) {
19406 return Err(fidl::Error::InvalidInlineBitInEnvelope);
19407 }
19408 let inner_offset;
19409 let mut inner_depth = depth.clone();
19410 if inlined {
19411 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
19412 inner_offset = next_offset;
19413 } else {
19414 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
19415 inner_depth.increment()?;
19416 }
19417 let val_ref = self.group_request.get_or_insert_with(|| {
19418 fidl::new_empty!(
19419 fidl::encoding::Endpoint<
19420 fidl::endpoints::ServerEnd<BufferCollectionTokenGroupMarker>,
19421 >,
19422 fidl::encoding::DefaultFuchsiaResourceDialect
19423 )
19424 });
19425 fidl::decode!(
19426 fidl::encoding::Endpoint<
19427 fidl::endpoints::ServerEnd<BufferCollectionTokenGroupMarker>,
19428 >,
19429 fidl::encoding::DefaultFuchsiaResourceDialect,
19430 val_ref,
19431 decoder,
19432 inner_offset,
19433 inner_depth
19434 )?;
19435 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
19436 {
19437 return Err(fidl::Error::InvalidNumBytesInEnvelope);
19438 }
19439 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
19440 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
19441 }
19442 }
19443
19444 next_offset += envelope_size;
19445
19446 // Decode the remaining unknown envelopes.
19447 while next_offset < end_offset {
19448 _next_ordinal_to_read += 1;
19449 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19450 next_offset += envelope_size;
19451 }
19452
19453 Ok(())
19454 }
19455 }
19456
19457 impl BufferCollectionTokenDuplicateRequest {
19458 #[inline(always)]
19459 fn max_ordinal_present(&self) -> u64 {
19460 if let Some(_) = self.token_request {
19461 return 2;
19462 }
19463 if let Some(_) = self.rights_attenuation_mask {
19464 return 1;
19465 }
19466 0
19467 }
19468 }
19469
19470 impl fidl::encoding::ResourceTypeMarker for BufferCollectionTokenDuplicateRequest {
19471 type Borrowed<'a> = &'a mut Self;
19472 fn take_or_borrow<'a>(
19473 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
19474 ) -> Self::Borrowed<'a> {
19475 value
19476 }
19477 }
19478
19479 unsafe impl fidl::encoding::TypeMarker for BufferCollectionTokenDuplicateRequest {
19480 type Owned = Self;
19481
19482 #[inline(always)]
19483 fn inline_align(_context: fidl::encoding::Context) -> usize {
19484 8
19485 }
19486
19487 #[inline(always)]
19488 fn inline_size(_context: fidl::encoding::Context) -> usize {
19489 16
19490 }
19491 }
19492
19493 unsafe impl
19494 fidl::encoding::Encode<
19495 BufferCollectionTokenDuplicateRequest,
19496 fidl::encoding::DefaultFuchsiaResourceDialect,
19497 > for &mut BufferCollectionTokenDuplicateRequest
19498 {
19499 unsafe fn encode(
19500 self,
19501 encoder: &mut fidl::encoding::Encoder<
19502 '_,
19503 fidl::encoding::DefaultFuchsiaResourceDialect,
19504 >,
19505 offset: usize,
19506 mut depth: fidl::encoding::Depth,
19507 ) -> fidl::Result<()> {
19508 encoder.debug_check_bounds::<BufferCollectionTokenDuplicateRequest>(offset);
19509 // Vector header
19510 let max_ordinal: u64 = self.max_ordinal_present();
19511 encoder.write_num(max_ordinal, offset);
19512 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
19513 // Calling encoder.out_of_line_offset(0) is not allowed.
19514 if max_ordinal == 0 {
19515 return Ok(());
19516 }
19517 depth.increment()?;
19518 let envelope_size = 8;
19519 let bytes_len = max_ordinal as usize * envelope_size;
19520 #[allow(unused_variables)]
19521 let offset = encoder.out_of_line_offset(bytes_len);
19522 let mut _prev_end_offset: usize = 0;
19523 if 1 > max_ordinal {
19524 return Ok(());
19525 }
19526
19527 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
19528 // are envelope_size bytes.
19529 let cur_offset: usize = (1 - 1) * envelope_size;
19530
19531 // Zero reserved fields.
19532 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
19533
19534 // Safety:
19535 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
19536 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
19537 // envelope_size bytes, there is always sufficient room.
19538 fidl::encoding::encode_in_envelope_optional::<
19539 fidl::Rights,
19540 fidl::encoding::DefaultFuchsiaResourceDialect,
19541 >(
19542 self.rights_attenuation_mask
19543 .as_ref()
19544 .map(<fidl::Rights as fidl::encoding::ValueTypeMarker>::borrow),
19545 encoder,
19546 offset + cur_offset,
19547 depth,
19548 )?;
19549
19550 _prev_end_offset = cur_offset + envelope_size;
19551 if 2 > max_ordinal {
19552 return Ok(());
19553 }
19554
19555 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
19556 // are envelope_size bytes.
19557 let cur_offset: usize = (2 - 1) * envelope_size;
19558
19559 // Zero reserved fields.
19560 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
19561
19562 // Safety:
19563 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
19564 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
19565 // envelope_size bytes, there is always sufficient room.
19566 fidl::encoding::encode_in_envelope_optional::<
19567 fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
19568 fidl::encoding::DefaultFuchsiaResourceDialect,
19569 >(
19570 self.token_request.as_mut().map(
19571 <fidl::encoding::Endpoint<
19572 fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19573 > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
19574 ),
19575 encoder,
19576 offset + cur_offset,
19577 depth,
19578 )?;
19579
19580 _prev_end_offset = cur_offset + envelope_size;
19581
19582 Ok(())
19583 }
19584 }
19585
19586 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
19587 for BufferCollectionTokenDuplicateRequest
19588 {
19589 #[inline(always)]
19590 fn new_empty() -> Self {
19591 Self::default()
19592 }
19593
19594 unsafe fn decode(
19595 &mut self,
19596 decoder: &mut fidl::encoding::Decoder<
19597 '_,
19598 fidl::encoding::DefaultFuchsiaResourceDialect,
19599 >,
19600 offset: usize,
19601 mut depth: fidl::encoding::Depth,
19602 ) -> fidl::Result<()> {
19603 decoder.debug_check_bounds::<Self>(offset);
19604 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
19605 None => return Err(fidl::Error::NotNullable),
19606 Some(len) => len,
19607 };
19608 // Calling decoder.out_of_line_offset(0) is not allowed.
19609 if len == 0 {
19610 return Ok(());
19611 };
19612 depth.increment()?;
19613 let envelope_size = 8;
19614 let bytes_len = len * envelope_size;
19615 let offset = decoder.out_of_line_offset(bytes_len)?;
19616 // Decode the envelope for each type.
19617 let mut _next_ordinal_to_read = 0;
19618 let mut next_offset = offset;
19619 let end_offset = offset + bytes_len;
19620 _next_ordinal_to_read += 1;
19621 if next_offset >= end_offset {
19622 return Ok(());
19623 }
19624
19625 // Decode unknown envelopes for gaps in ordinals.
19626 while _next_ordinal_to_read < 1 {
19627 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19628 _next_ordinal_to_read += 1;
19629 next_offset += envelope_size;
19630 }
19631
19632 let next_out_of_line = decoder.next_out_of_line();
19633 let handles_before = decoder.remaining_handles();
19634 if let Some((inlined, num_bytes, num_handles)) =
19635 fidl::encoding::decode_envelope_header(decoder, next_offset)?
19636 {
19637 let member_inline_size =
19638 <fidl::Rights as fidl::encoding::TypeMarker>::inline_size(decoder.context);
19639 if inlined != (member_inline_size <= 4) {
19640 return Err(fidl::Error::InvalidInlineBitInEnvelope);
19641 }
19642 let inner_offset;
19643 let mut inner_depth = depth.clone();
19644 if inlined {
19645 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
19646 inner_offset = next_offset;
19647 } else {
19648 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
19649 inner_depth.increment()?;
19650 }
19651 let val_ref = self.rights_attenuation_mask.get_or_insert_with(|| {
19652 fidl::new_empty!(fidl::Rights, fidl::encoding::DefaultFuchsiaResourceDialect)
19653 });
19654 fidl::decode!(
19655 fidl::Rights,
19656 fidl::encoding::DefaultFuchsiaResourceDialect,
19657 val_ref,
19658 decoder,
19659 inner_offset,
19660 inner_depth
19661 )?;
19662 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
19663 {
19664 return Err(fidl::Error::InvalidNumBytesInEnvelope);
19665 }
19666 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
19667 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
19668 }
19669 }
19670
19671 next_offset += envelope_size;
19672 _next_ordinal_to_read += 1;
19673 if next_offset >= end_offset {
19674 return Ok(());
19675 }
19676
19677 // Decode unknown envelopes for gaps in ordinals.
19678 while _next_ordinal_to_read < 2 {
19679 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19680 _next_ordinal_to_read += 1;
19681 next_offset += envelope_size;
19682 }
19683
19684 let next_out_of_line = decoder.next_out_of_line();
19685 let handles_before = decoder.remaining_handles();
19686 if let Some((inlined, num_bytes, num_handles)) =
19687 fidl::encoding::decode_envelope_header(decoder, next_offset)?
19688 {
19689 let member_inline_size = <fidl::encoding::Endpoint<
19690 fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19691 > as fidl::encoding::TypeMarker>::inline_size(
19692 decoder.context
19693 );
19694 if inlined != (member_inline_size <= 4) {
19695 return Err(fidl::Error::InvalidInlineBitInEnvelope);
19696 }
19697 let inner_offset;
19698 let mut inner_depth = depth.clone();
19699 if inlined {
19700 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
19701 inner_offset = next_offset;
19702 } else {
19703 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
19704 inner_depth.increment()?;
19705 }
19706 let val_ref = self.token_request.get_or_insert_with(|| {
19707 fidl::new_empty!(
19708 fidl::encoding::Endpoint<
19709 fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19710 >,
19711 fidl::encoding::DefaultFuchsiaResourceDialect
19712 )
19713 });
19714 fidl::decode!(
19715 fidl::encoding::Endpoint<
19716 fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19717 >,
19718 fidl::encoding::DefaultFuchsiaResourceDialect,
19719 val_ref,
19720 decoder,
19721 inner_offset,
19722 inner_depth
19723 )?;
19724 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
19725 {
19726 return Err(fidl::Error::InvalidNumBytesInEnvelope);
19727 }
19728 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
19729 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
19730 }
19731 }
19732
19733 next_offset += envelope_size;
19734
19735 // Decode the remaining unknown envelopes.
19736 while next_offset < end_offset {
19737 _next_ordinal_to_read += 1;
19738 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19739 next_offset += envelope_size;
19740 }
19741
19742 Ok(())
19743 }
19744 }
19745
19746 impl BufferCollectionTokenGroupCreateChildRequest {
19747 #[inline(always)]
19748 fn max_ordinal_present(&self) -> u64 {
19749 if let Some(_) = self.rights_attenuation_mask {
19750 return 2;
19751 }
19752 if let Some(_) = self.token_request {
19753 return 1;
19754 }
19755 0
19756 }
19757 }
19758
19759 impl fidl::encoding::ResourceTypeMarker for BufferCollectionTokenGroupCreateChildRequest {
19760 type Borrowed<'a> = &'a mut Self;
19761 fn take_or_borrow<'a>(
19762 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
19763 ) -> Self::Borrowed<'a> {
19764 value
19765 }
19766 }
19767
19768 unsafe impl fidl::encoding::TypeMarker for BufferCollectionTokenGroupCreateChildRequest {
19769 type Owned = Self;
19770
19771 #[inline(always)]
19772 fn inline_align(_context: fidl::encoding::Context) -> usize {
19773 8
19774 }
19775
19776 #[inline(always)]
19777 fn inline_size(_context: fidl::encoding::Context) -> usize {
19778 16
19779 }
19780 }
19781
19782 unsafe impl
19783 fidl::encoding::Encode<
19784 BufferCollectionTokenGroupCreateChildRequest,
19785 fidl::encoding::DefaultFuchsiaResourceDialect,
19786 > for &mut BufferCollectionTokenGroupCreateChildRequest
19787 {
19788 unsafe fn encode(
19789 self,
19790 encoder: &mut fidl::encoding::Encoder<
19791 '_,
19792 fidl::encoding::DefaultFuchsiaResourceDialect,
19793 >,
19794 offset: usize,
19795 mut depth: fidl::encoding::Depth,
19796 ) -> fidl::Result<()> {
19797 encoder.debug_check_bounds::<BufferCollectionTokenGroupCreateChildRequest>(offset);
19798 // Vector header
19799 let max_ordinal: u64 = self.max_ordinal_present();
19800 encoder.write_num(max_ordinal, offset);
19801 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
19802 // Calling encoder.out_of_line_offset(0) is not allowed.
19803 if max_ordinal == 0 {
19804 return Ok(());
19805 }
19806 depth.increment()?;
19807 let envelope_size = 8;
19808 let bytes_len = max_ordinal as usize * envelope_size;
19809 #[allow(unused_variables)]
19810 let offset = encoder.out_of_line_offset(bytes_len);
19811 let mut _prev_end_offset: usize = 0;
19812 if 1 > max_ordinal {
19813 return Ok(());
19814 }
19815
19816 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
19817 // are envelope_size bytes.
19818 let cur_offset: usize = (1 - 1) * envelope_size;
19819
19820 // Zero reserved fields.
19821 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
19822
19823 // Safety:
19824 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
19825 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
19826 // envelope_size bytes, there is always sufficient room.
19827 fidl::encoding::encode_in_envelope_optional::<
19828 fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
19829 fidl::encoding::DefaultFuchsiaResourceDialect,
19830 >(
19831 self.token_request.as_mut().map(
19832 <fidl::encoding::Endpoint<
19833 fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19834 > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
19835 ),
19836 encoder,
19837 offset + cur_offset,
19838 depth,
19839 )?;
19840
19841 _prev_end_offset = cur_offset + envelope_size;
19842 if 2 > max_ordinal {
19843 return Ok(());
19844 }
19845
19846 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
19847 // are envelope_size bytes.
19848 let cur_offset: usize = (2 - 1) * envelope_size;
19849
19850 // Zero reserved fields.
19851 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
19852
19853 // Safety:
19854 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
19855 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
19856 // envelope_size bytes, there is always sufficient room.
19857 fidl::encoding::encode_in_envelope_optional::<
19858 fidl::Rights,
19859 fidl::encoding::DefaultFuchsiaResourceDialect,
19860 >(
19861 self.rights_attenuation_mask
19862 .as_ref()
19863 .map(<fidl::Rights as fidl::encoding::ValueTypeMarker>::borrow),
19864 encoder,
19865 offset + cur_offset,
19866 depth,
19867 )?;
19868
19869 _prev_end_offset = cur_offset + envelope_size;
19870
19871 Ok(())
19872 }
19873 }
19874
19875 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
19876 for BufferCollectionTokenGroupCreateChildRequest
19877 {
19878 #[inline(always)]
19879 fn new_empty() -> Self {
19880 Self::default()
19881 }
19882
19883 unsafe fn decode(
19884 &mut self,
19885 decoder: &mut fidl::encoding::Decoder<
19886 '_,
19887 fidl::encoding::DefaultFuchsiaResourceDialect,
19888 >,
19889 offset: usize,
19890 mut depth: fidl::encoding::Depth,
19891 ) -> fidl::Result<()> {
19892 decoder.debug_check_bounds::<Self>(offset);
19893 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
19894 None => return Err(fidl::Error::NotNullable),
19895 Some(len) => len,
19896 };
19897 // Calling decoder.out_of_line_offset(0) is not allowed.
19898 if len == 0 {
19899 return Ok(());
19900 };
19901 depth.increment()?;
19902 let envelope_size = 8;
19903 let bytes_len = len * envelope_size;
19904 let offset = decoder.out_of_line_offset(bytes_len)?;
19905 // Decode the envelope for each type.
19906 let mut _next_ordinal_to_read = 0;
19907 let mut next_offset = offset;
19908 let end_offset = offset + bytes_len;
19909 _next_ordinal_to_read += 1;
19910 if next_offset >= end_offset {
19911 return Ok(());
19912 }
19913
19914 // Decode unknown envelopes for gaps in ordinals.
19915 while _next_ordinal_to_read < 1 {
19916 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19917 _next_ordinal_to_read += 1;
19918 next_offset += envelope_size;
19919 }
19920
19921 let next_out_of_line = decoder.next_out_of_line();
19922 let handles_before = decoder.remaining_handles();
19923 if let Some((inlined, num_bytes, num_handles)) =
19924 fidl::encoding::decode_envelope_header(decoder, next_offset)?
19925 {
19926 let member_inline_size = <fidl::encoding::Endpoint<
19927 fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19928 > as fidl::encoding::TypeMarker>::inline_size(
19929 decoder.context
19930 );
19931 if inlined != (member_inline_size <= 4) {
19932 return Err(fidl::Error::InvalidInlineBitInEnvelope);
19933 }
19934 let inner_offset;
19935 let mut inner_depth = depth.clone();
19936 if inlined {
19937 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
19938 inner_offset = next_offset;
19939 } else {
19940 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
19941 inner_depth.increment()?;
19942 }
19943 let val_ref = self.token_request.get_or_insert_with(|| {
19944 fidl::new_empty!(
19945 fidl::encoding::Endpoint<
19946 fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19947 >,
19948 fidl::encoding::DefaultFuchsiaResourceDialect
19949 )
19950 });
19951 fidl::decode!(
19952 fidl::encoding::Endpoint<
19953 fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19954 >,
19955 fidl::encoding::DefaultFuchsiaResourceDialect,
19956 val_ref,
19957 decoder,
19958 inner_offset,
19959 inner_depth
19960 )?;
19961 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
19962 {
19963 return Err(fidl::Error::InvalidNumBytesInEnvelope);
19964 }
19965 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
19966 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
19967 }
19968 }
19969
19970 next_offset += envelope_size;
19971 _next_ordinal_to_read += 1;
19972 if next_offset >= end_offset {
19973 return Ok(());
19974 }
19975
19976 // Decode unknown envelopes for gaps in ordinals.
19977 while _next_ordinal_to_read < 2 {
19978 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19979 _next_ordinal_to_read += 1;
19980 next_offset += envelope_size;
19981 }
19982
19983 let next_out_of_line = decoder.next_out_of_line();
19984 let handles_before = decoder.remaining_handles();
19985 if let Some((inlined, num_bytes, num_handles)) =
19986 fidl::encoding::decode_envelope_header(decoder, next_offset)?
19987 {
19988 let member_inline_size =
19989 <fidl::Rights as fidl::encoding::TypeMarker>::inline_size(decoder.context);
19990 if inlined != (member_inline_size <= 4) {
19991 return Err(fidl::Error::InvalidInlineBitInEnvelope);
19992 }
19993 let inner_offset;
19994 let mut inner_depth = depth.clone();
19995 if inlined {
19996 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
19997 inner_offset = next_offset;
19998 } else {
19999 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
20000 inner_depth.increment()?;
20001 }
20002 let val_ref = self.rights_attenuation_mask.get_or_insert_with(|| {
20003 fidl::new_empty!(fidl::Rights, fidl::encoding::DefaultFuchsiaResourceDialect)
20004 });
20005 fidl::decode!(
20006 fidl::Rights,
20007 fidl::encoding::DefaultFuchsiaResourceDialect,
20008 val_ref,
20009 decoder,
20010 inner_offset,
20011 inner_depth
20012 )?;
20013 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
20014 {
20015 return Err(fidl::Error::InvalidNumBytesInEnvelope);
20016 }
20017 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
20018 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
20019 }
20020 }
20021
20022 next_offset += envelope_size;
20023
20024 // Decode the remaining unknown envelopes.
20025 while next_offset < end_offset {
20026 _next_ordinal_to_read += 1;
20027 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20028 next_offset += envelope_size;
20029 }
20030
20031 Ok(())
20032 }
20033 }
20034
20035 impl BufferCollectionTokenGroupCreateChildrenSyncResponse {
20036 #[inline(always)]
20037 fn max_ordinal_present(&self) -> u64 {
20038 if let Some(_) = self.tokens {
20039 return 1;
20040 }
20041 0
20042 }
20043 }
20044
20045 impl fidl::encoding::ResourceTypeMarker for BufferCollectionTokenGroupCreateChildrenSyncResponse {
20046 type Borrowed<'a> = &'a mut Self;
20047 fn take_or_borrow<'a>(
20048 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
20049 ) -> Self::Borrowed<'a> {
20050 value
20051 }
20052 }
20053
20054 unsafe impl fidl::encoding::TypeMarker for BufferCollectionTokenGroupCreateChildrenSyncResponse {
20055 type Owned = Self;
20056
20057 #[inline(always)]
20058 fn inline_align(_context: fidl::encoding::Context) -> usize {
20059 8
20060 }
20061
20062 #[inline(always)]
20063 fn inline_size(_context: fidl::encoding::Context) -> usize {
20064 16
20065 }
20066 }
20067
20068 unsafe impl
20069 fidl::encoding::Encode<
20070 BufferCollectionTokenGroupCreateChildrenSyncResponse,
20071 fidl::encoding::DefaultFuchsiaResourceDialect,
20072 > for &mut BufferCollectionTokenGroupCreateChildrenSyncResponse
20073 {
20074 unsafe fn encode(
20075 self,
20076 encoder: &mut fidl::encoding::Encoder<
20077 '_,
20078 fidl::encoding::DefaultFuchsiaResourceDialect,
20079 >,
20080 offset: usize,
20081 mut depth: fidl::encoding::Depth,
20082 ) -> fidl::Result<()> {
20083 encoder
20084 .debug_check_bounds::<BufferCollectionTokenGroupCreateChildrenSyncResponse>(offset);
20085 // Vector header
20086 let max_ordinal: u64 = self.max_ordinal_present();
20087 encoder.write_num(max_ordinal, offset);
20088 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
20089 // Calling encoder.out_of_line_offset(0) is not allowed.
20090 if max_ordinal == 0 {
20091 return Ok(());
20092 }
20093 depth.increment()?;
20094 let envelope_size = 8;
20095 let bytes_len = max_ordinal as usize * envelope_size;
20096 #[allow(unused_variables)]
20097 let offset = encoder.out_of_line_offset(bytes_len);
20098 let mut _prev_end_offset: usize = 0;
20099 if 1 > max_ordinal {
20100 return Ok(());
20101 }
20102
20103 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
20104 // are envelope_size bytes.
20105 let cur_offset: usize = (1 - 1) * envelope_size;
20106
20107 // Zero reserved fields.
20108 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
20109
20110 // Safety:
20111 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
20112 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
20113 // envelope_size bytes, there is always sufficient room.
20114 fidl::encoding::encode_in_envelope_optional::<
20115 fidl::encoding::Vector<
20116 fidl::encoding::Endpoint<
20117 fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20118 >,
20119 64,
20120 >,
20121 fidl::encoding::DefaultFuchsiaResourceDialect,
20122 >(
20123 self.tokens.as_mut().map(
20124 <fidl::encoding::Vector<
20125 fidl::encoding::Endpoint<
20126 fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20127 >,
20128 64,
20129 > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
20130 ),
20131 encoder,
20132 offset + cur_offset,
20133 depth,
20134 )?;
20135
20136 _prev_end_offset = cur_offset + envelope_size;
20137
20138 Ok(())
20139 }
20140 }
20141
20142 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
20143 for BufferCollectionTokenGroupCreateChildrenSyncResponse
20144 {
20145 #[inline(always)]
20146 fn new_empty() -> Self {
20147 Self::default()
20148 }
20149
20150 unsafe fn decode(
20151 &mut self,
20152 decoder: &mut fidl::encoding::Decoder<
20153 '_,
20154 fidl::encoding::DefaultFuchsiaResourceDialect,
20155 >,
20156 offset: usize,
20157 mut depth: fidl::encoding::Depth,
20158 ) -> fidl::Result<()> {
20159 decoder.debug_check_bounds::<Self>(offset);
20160 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
20161 None => return Err(fidl::Error::NotNullable),
20162 Some(len) => len,
20163 };
20164 // Calling decoder.out_of_line_offset(0) is not allowed.
20165 if len == 0 {
20166 return Ok(());
20167 };
20168 depth.increment()?;
20169 let envelope_size = 8;
20170 let bytes_len = len * envelope_size;
20171 let offset = decoder.out_of_line_offset(bytes_len)?;
20172 // Decode the envelope for each type.
20173 let mut _next_ordinal_to_read = 0;
20174 let mut next_offset = offset;
20175 let end_offset = offset + bytes_len;
20176 _next_ordinal_to_read += 1;
20177 if next_offset >= end_offset {
20178 return Ok(());
20179 }
20180
20181 // Decode unknown envelopes for gaps in ordinals.
20182 while _next_ordinal_to_read < 1 {
20183 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20184 _next_ordinal_to_read += 1;
20185 next_offset += envelope_size;
20186 }
20187
20188 let next_out_of_line = decoder.next_out_of_line();
20189 let handles_before = decoder.remaining_handles();
20190 if let Some((inlined, num_bytes, num_handles)) =
20191 fidl::encoding::decode_envelope_header(decoder, next_offset)?
20192 {
20193 let member_inline_size = <fidl::encoding::Vector<
20194 fidl::encoding::Endpoint<
20195 fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20196 >,
20197 64,
20198 > as fidl::encoding::TypeMarker>::inline_size(
20199 decoder.context
20200 );
20201 if inlined != (member_inline_size <= 4) {
20202 return Err(fidl::Error::InvalidInlineBitInEnvelope);
20203 }
20204 let inner_offset;
20205 let mut inner_depth = depth.clone();
20206 if inlined {
20207 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
20208 inner_offset = next_offset;
20209 } else {
20210 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
20211 inner_depth.increment()?;
20212 }
20213 let val_ref = self.tokens.get_or_insert_with(|| {
20214 fidl::new_empty!(
20215 fidl::encoding::Vector<
20216 fidl::encoding::Endpoint<
20217 fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20218 >,
20219 64,
20220 >,
20221 fidl::encoding::DefaultFuchsiaResourceDialect
20222 )
20223 });
20224 fidl::decode!(
20225 fidl::encoding::Vector<
20226 fidl::encoding::Endpoint<
20227 fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20228 >,
20229 64,
20230 >,
20231 fidl::encoding::DefaultFuchsiaResourceDialect,
20232 val_ref,
20233 decoder,
20234 inner_offset,
20235 inner_depth
20236 )?;
20237 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
20238 {
20239 return Err(fidl::Error::InvalidNumBytesInEnvelope);
20240 }
20241 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
20242 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
20243 }
20244 }
20245
20246 next_offset += envelope_size;
20247
20248 // Decode the remaining unknown envelopes.
20249 while next_offset < end_offset {
20250 _next_ordinal_to_read += 1;
20251 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20252 next_offset += envelope_size;
20253 }
20254
20255 Ok(())
20256 }
20257 }
20258
20259 impl BufferCollectionTokenDuplicateSyncResponse {
20260 #[inline(always)]
20261 fn max_ordinal_present(&self) -> u64 {
20262 if let Some(_) = self.tokens {
20263 return 1;
20264 }
20265 0
20266 }
20267 }
20268
20269 impl fidl::encoding::ResourceTypeMarker for BufferCollectionTokenDuplicateSyncResponse {
20270 type Borrowed<'a> = &'a mut Self;
20271 fn take_or_borrow<'a>(
20272 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
20273 ) -> Self::Borrowed<'a> {
20274 value
20275 }
20276 }
20277
20278 unsafe impl fidl::encoding::TypeMarker for BufferCollectionTokenDuplicateSyncResponse {
20279 type Owned = Self;
20280
20281 #[inline(always)]
20282 fn inline_align(_context: fidl::encoding::Context) -> usize {
20283 8
20284 }
20285
20286 #[inline(always)]
20287 fn inline_size(_context: fidl::encoding::Context) -> usize {
20288 16
20289 }
20290 }
20291
20292 unsafe impl
20293 fidl::encoding::Encode<
20294 BufferCollectionTokenDuplicateSyncResponse,
20295 fidl::encoding::DefaultFuchsiaResourceDialect,
20296 > for &mut BufferCollectionTokenDuplicateSyncResponse
20297 {
20298 unsafe fn encode(
20299 self,
20300 encoder: &mut fidl::encoding::Encoder<
20301 '_,
20302 fidl::encoding::DefaultFuchsiaResourceDialect,
20303 >,
20304 offset: usize,
20305 mut depth: fidl::encoding::Depth,
20306 ) -> fidl::Result<()> {
20307 encoder.debug_check_bounds::<BufferCollectionTokenDuplicateSyncResponse>(offset);
20308 // Vector header
20309 let max_ordinal: u64 = self.max_ordinal_present();
20310 encoder.write_num(max_ordinal, offset);
20311 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
20312 // Calling encoder.out_of_line_offset(0) is not allowed.
20313 if max_ordinal == 0 {
20314 return Ok(());
20315 }
20316 depth.increment()?;
20317 let envelope_size = 8;
20318 let bytes_len = max_ordinal as usize * envelope_size;
20319 #[allow(unused_variables)]
20320 let offset = encoder.out_of_line_offset(bytes_len);
20321 let mut _prev_end_offset: usize = 0;
20322 if 1 > max_ordinal {
20323 return Ok(());
20324 }
20325
20326 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
20327 // are envelope_size bytes.
20328 let cur_offset: usize = (1 - 1) * envelope_size;
20329
20330 // Zero reserved fields.
20331 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
20332
20333 // Safety:
20334 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
20335 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
20336 // envelope_size bytes, there is always sufficient room.
20337 fidl::encoding::encode_in_envelope_optional::<
20338 fidl::encoding::Vector<
20339 fidl::encoding::Endpoint<
20340 fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20341 >,
20342 64,
20343 >,
20344 fidl::encoding::DefaultFuchsiaResourceDialect,
20345 >(
20346 self.tokens.as_mut().map(
20347 <fidl::encoding::Vector<
20348 fidl::encoding::Endpoint<
20349 fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20350 >,
20351 64,
20352 > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
20353 ),
20354 encoder,
20355 offset + cur_offset,
20356 depth,
20357 )?;
20358
20359 _prev_end_offset = cur_offset + envelope_size;
20360
20361 Ok(())
20362 }
20363 }
20364
20365 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
20366 for BufferCollectionTokenDuplicateSyncResponse
20367 {
20368 #[inline(always)]
20369 fn new_empty() -> Self {
20370 Self::default()
20371 }
20372
20373 unsafe fn decode(
20374 &mut self,
20375 decoder: &mut fidl::encoding::Decoder<
20376 '_,
20377 fidl::encoding::DefaultFuchsiaResourceDialect,
20378 >,
20379 offset: usize,
20380 mut depth: fidl::encoding::Depth,
20381 ) -> fidl::Result<()> {
20382 decoder.debug_check_bounds::<Self>(offset);
20383 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
20384 None => return Err(fidl::Error::NotNullable),
20385 Some(len) => len,
20386 };
20387 // Calling decoder.out_of_line_offset(0) is not allowed.
20388 if len == 0 {
20389 return Ok(());
20390 };
20391 depth.increment()?;
20392 let envelope_size = 8;
20393 let bytes_len = len * envelope_size;
20394 let offset = decoder.out_of_line_offset(bytes_len)?;
20395 // Decode the envelope for each type.
20396 let mut _next_ordinal_to_read = 0;
20397 let mut next_offset = offset;
20398 let end_offset = offset + bytes_len;
20399 _next_ordinal_to_read += 1;
20400 if next_offset >= end_offset {
20401 return Ok(());
20402 }
20403
20404 // Decode unknown envelopes for gaps in ordinals.
20405 while _next_ordinal_to_read < 1 {
20406 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20407 _next_ordinal_to_read += 1;
20408 next_offset += envelope_size;
20409 }
20410
20411 let next_out_of_line = decoder.next_out_of_line();
20412 let handles_before = decoder.remaining_handles();
20413 if let Some((inlined, num_bytes, num_handles)) =
20414 fidl::encoding::decode_envelope_header(decoder, next_offset)?
20415 {
20416 let member_inline_size = <fidl::encoding::Vector<
20417 fidl::encoding::Endpoint<
20418 fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20419 >,
20420 64,
20421 > as fidl::encoding::TypeMarker>::inline_size(
20422 decoder.context
20423 );
20424 if inlined != (member_inline_size <= 4) {
20425 return Err(fidl::Error::InvalidInlineBitInEnvelope);
20426 }
20427 let inner_offset;
20428 let mut inner_depth = depth.clone();
20429 if inlined {
20430 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
20431 inner_offset = next_offset;
20432 } else {
20433 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
20434 inner_depth.increment()?;
20435 }
20436 let val_ref = self.tokens.get_or_insert_with(|| {
20437 fidl::new_empty!(
20438 fidl::encoding::Vector<
20439 fidl::encoding::Endpoint<
20440 fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20441 >,
20442 64,
20443 >,
20444 fidl::encoding::DefaultFuchsiaResourceDialect
20445 )
20446 });
20447 fidl::decode!(
20448 fidl::encoding::Vector<
20449 fidl::encoding::Endpoint<
20450 fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20451 >,
20452 64,
20453 >,
20454 fidl::encoding::DefaultFuchsiaResourceDialect,
20455 val_ref,
20456 decoder,
20457 inner_offset,
20458 inner_depth
20459 )?;
20460 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
20461 {
20462 return Err(fidl::Error::InvalidNumBytesInEnvelope);
20463 }
20464 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
20465 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
20466 }
20467 }
20468
20469 next_offset += envelope_size;
20470
20471 // Decode the remaining unknown envelopes.
20472 while next_offset < end_offset {
20473 _next_ordinal_to_read += 1;
20474 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20475 next_offset += envelope_size;
20476 }
20477
20478 Ok(())
20479 }
20480 }
20481
20482 impl BufferCollectionWaitForAllBuffersAllocatedResponse {
20483 #[inline(always)]
20484 fn max_ordinal_present(&self) -> u64 {
20485 if let Some(_) = self.buffer_collection_info {
20486 return 1;
20487 }
20488 0
20489 }
20490 }
20491
20492 impl fidl::encoding::ResourceTypeMarker for BufferCollectionWaitForAllBuffersAllocatedResponse {
20493 type Borrowed<'a> = &'a mut Self;
20494 fn take_or_borrow<'a>(
20495 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
20496 ) -> Self::Borrowed<'a> {
20497 value
20498 }
20499 }
20500
20501 unsafe impl fidl::encoding::TypeMarker for BufferCollectionWaitForAllBuffersAllocatedResponse {
20502 type Owned = Self;
20503
20504 #[inline(always)]
20505 fn inline_align(_context: fidl::encoding::Context) -> usize {
20506 8
20507 }
20508
20509 #[inline(always)]
20510 fn inline_size(_context: fidl::encoding::Context) -> usize {
20511 16
20512 }
20513 }
20514
20515 unsafe impl
20516 fidl::encoding::Encode<
20517 BufferCollectionWaitForAllBuffersAllocatedResponse,
20518 fidl::encoding::DefaultFuchsiaResourceDialect,
20519 > for &mut BufferCollectionWaitForAllBuffersAllocatedResponse
20520 {
20521 unsafe fn encode(
20522 self,
20523 encoder: &mut fidl::encoding::Encoder<
20524 '_,
20525 fidl::encoding::DefaultFuchsiaResourceDialect,
20526 >,
20527 offset: usize,
20528 mut depth: fidl::encoding::Depth,
20529 ) -> fidl::Result<()> {
20530 encoder
20531 .debug_check_bounds::<BufferCollectionWaitForAllBuffersAllocatedResponse>(offset);
20532 // Vector header
20533 let max_ordinal: u64 = self.max_ordinal_present();
20534 encoder.write_num(max_ordinal, offset);
20535 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
20536 // Calling encoder.out_of_line_offset(0) is not allowed.
20537 if max_ordinal == 0 {
20538 return Ok(());
20539 }
20540 depth.increment()?;
20541 let envelope_size = 8;
20542 let bytes_len = max_ordinal as usize * envelope_size;
20543 #[allow(unused_variables)]
20544 let offset = encoder.out_of_line_offset(bytes_len);
20545 let mut _prev_end_offset: usize = 0;
20546 if 1 > max_ordinal {
20547 return Ok(());
20548 }
20549
20550 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
20551 // are envelope_size bytes.
20552 let cur_offset: usize = (1 - 1) * envelope_size;
20553
20554 // Zero reserved fields.
20555 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
20556
20557 // Safety:
20558 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
20559 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
20560 // envelope_size bytes, there is always sufficient room.
20561 fidl::encoding::encode_in_envelope_optional::<
20562 BufferCollectionInfo,
20563 fidl::encoding::DefaultFuchsiaResourceDialect,
20564 >(
20565 self.buffer_collection_info.as_mut().map(
20566 <BufferCollectionInfo as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
20567 ),
20568 encoder,
20569 offset + cur_offset,
20570 depth,
20571 )?;
20572
20573 _prev_end_offset = cur_offset + envelope_size;
20574
20575 Ok(())
20576 }
20577 }
20578
20579 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
20580 for BufferCollectionWaitForAllBuffersAllocatedResponse
20581 {
20582 #[inline(always)]
20583 fn new_empty() -> Self {
20584 Self::default()
20585 }
20586
20587 unsafe fn decode(
20588 &mut self,
20589 decoder: &mut fidl::encoding::Decoder<
20590 '_,
20591 fidl::encoding::DefaultFuchsiaResourceDialect,
20592 >,
20593 offset: usize,
20594 mut depth: fidl::encoding::Depth,
20595 ) -> fidl::Result<()> {
20596 decoder.debug_check_bounds::<Self>(offset);
20597 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
20598 None => return Err(fidl::Error::NotNullable),
20599 Some(len) => len,
20600 };
20601 // Calling decoder.out_of_line_offset(0) is not allowed.
20602 if len == 0 {
20603 return Ok(());
20604 };
20605 depth.increment()?;
20606 let envelope_size = 8;
20607 let bytes_len = len * envelope_size;
20608 let offset = decoder.out_of_line_offset(bytes_len)?;
20609 // Decode the envelope for each type.
20610 let mut _next_ordinal_to_read = 0;
20611 let mut next_offset = offset;
20612 let end_offset = offset + bytes_len;
20613 _next_ordinal_to_read += 1;
20614 if next_offset >= end_offset {
20615 return Ok(());
20616 }
20617
20618 // Decode unknown envelopes for gaps in ordinals.
20619 while _next_ordinal_to_read < 1 {
20620 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20621 _next_ordinal_to_read += 1;
20622 next_offset += envelope_size;
20623 }
20624
20625 let next_out_of_line = decoder.next_out_of_line();
20626 let handles_before = decoder.remaining_handles();
20627 if let Some((inlined, num_bytes, num_handles)) =
20628 fidl::encoding::decode_envelope_header(decoder, next_offset)?
20629 {
20630 let member_inline_size =
20631 <BufferCollectionInfo as fidl::encoding::TypeMarker>::inline_size(
20632 decoder.context,
20633 );
20634 if inlined != (member_inline_size <= 4) {
20635 return Err(fidl::Error::InvalidInlineBitInEnvelope);
20636 }
20637 let inner_offset;
20638 let mut inner_depth = depth.clone();
20639 if inlined {
20640 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
20641 inner_offset = next_offset;
20642 } else {
20643 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
20644 inner_depth.increment()?;
20645 }
20646 let val_ref = self.buffer_collection_info.get_or_insert_with(|| {
20647 fidl::new_empty!(
20648 BufferCollectionInfo,
20649 fidl::encoding::DefaultFuchsiaResourceDialect
20650 )
20651 });
20652 fidl::decode!(
20653 BufferCollectionInfo,
20654 fidl::encoding::DefaultFuchsiaResourceDialect,
20655 val_ref,
20656 decoder,
20657 inner_offset,
20658 inner_depth
20659 )?;
20660 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
20661 {
20662 return Err(fidl::Error::InvalidNumBytesInEnvelope);
20663 }
20664 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
20665 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
20666 }
20667 }
20668
20669 next_offset += envelope_size;
20670
20671 // Decode the remaining unknown envelopes.
20672 while next_offset < end_offset {
20673 _next_ordinal_to_read += 1;
20674 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20675 next_offset += envelope_size;
20676 }
20677
20678 Ok(())
20679 }
20680 }
20681
20682 impl NodeAttachNodeTrackingRequest {
20683 #[inline(always)]
20684 fn max_ordinal_present(&self) -> u64 {
20685 if let Some(_) = self.server_end {
20686 return 1;
20687 }
20688 0
20689 }
20690 }
20691
20692 impl fidl::encoding::ResourceTypeMarker for NodeAttachNodeTrackingRequest {
20693 type Borrowed<'a> = &'a mut Self;
20694 fn take_or_borrow<'a>(
20695 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
20696 ) -> Self::Borrowed<'a> {
20697 value
20698 }
20699 }
20700
20701 unsafe impl fidl::encoding::TypeMarker for NodeAttachNodeTrackingRequest {
20702 type Owned = Self;
20703
20704 #[inline(always)]
20705 fn inline_align(_context: fidl::encoding::Context) -> usize {
20706 8
20707 }
20708
20709 #[inline(always)]
20710 fn inline_size(_context: fidl::encoding::Context) -> usize {
20711 16
20712 }
20713 }
20714
20715 unsafe impl
20716 fidl::encoding::Encode<
20717 NodeAttachNodeTrackingRequest,
20718 fidl::encoding::DefaultFuchsiaResourceDialect,
20719 > for &mut NodeAttachNodeTrackingRequest
20720 {
20721 unsafe fn encode(
20722 self,
20723 encoder: &mut fidl::encoding::Encoder<
20724 '_,
20725 fidl::encoding::DefaultFuchsiaResourceDialect,
20726 >,
20727 offset: usize,
20728 mut depth: fidl::encoding::Depth,
20729 ) -> fidl::Result<()> {
20730 encoder.debug_check_bounds::<NodeAttachNodeTrackingRequest>(offset);
20731 // Vector header
20732 let max_ordinal: u64 = self.max_ordinal_present();
20733 encoder.write_num(max_ordinal, offset);
20734 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
20735 // Calling encoder.out_of_line_offset(0) is not allowed.
20736 if max_ordinal == 0 {
20737 return Ok(());
20738 }
20739 depth.increment()?;
20740 let envelope_size = 8;
20741 let bytes_len = max_ordinal as usize * envelope_size;
20742 #[allow(unused_variables)]
20743 let offset = encoder.out_of_line_offset(bytes_len);
20744 let mut _prev_end_offset: usize = 0;
20745 if 1 > max_ordinal {
20746 return Ok(());
20747 }
20748
20749 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
20750 // are envelope_size bytes.
20751 let cur_offset: usize = (1 - 1) * envelope_size;
20752
20753 // Zero reserved fields.
20754 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
20755
20756 // Safety:
20757 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
20758 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
20759 // envelope_size bytes, there is always sufficient room.
20760 fidl::encoding::encode_in_envelope_optional::<
20761 fidl::encoding::HandleType<
20762 fidl::EventPair,
20763 { fidl::ObjectType::EVENTPAIR.into_raw() },
20764 2147483648,
20765 >,
20766 fidl::encoding::DefaultFuchsiaResourceDialect,
20767 >(
20768 self.server_end.as_mut().map(
20769 <fidl::encoding::HandleType<
20770 fidl::EventPair,
20771 { fidl::ObjectType::EVENTPAIR.into_raw() },
20772 2147483648,
20773 > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
20774 ),
20775 encoder,
20776 offset + cur_offset,
20777 depth,
20778 )?;
20779
20780 _prev_end_offset = cur_offset + envelope_size;
20781
20782 Ok(())
20783 }
20784 }
20785
20786 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
20787 for NodeAttachNodeTrackingRequest
20788 {
20789 #[inline(always)]
20790 fn new_empty() -> Self {
20791 Self::default()
20792 }
20793
20794 unsafe fn decode(
20795 &mut self,
20796 decoder: &mut fidl::encoding::Decoder<
20797 '_,
20798 fidl::encoding::DefaultFuchsiaResourceDialect,
20799 >,
20800 offset: usize,
20801 mut depth: fidl::encoding::Depth,
20802 ) -> fidl::Result<()> {
20803 decoder.debug_check_bounds::<Self>(offset);
20804 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
20805 None => return Err(fidl::Error::NotNullable),
20806 Some(len) => len,
20807 };
20808 // Calling decoder.out_of_line_offset(0) is not allowed.
20809 if len == 0 {
20810 return Ok(());
20811 };
20812 depth.increment()?;
20813 let envelope_size = 8;
20814 let bytes_len = len * envelope_size;
20815 let offset = decoder.out_of_line_offset(bytes_len)?;
20816 // Decode the envelope for each type.
20817 let mut _next_ordinal_to_read = 0;
20818 let mut next_offset = offset;
20819 let end_offset = offset + bytes_len;
20820 _next_ordinal_to_read += 1;
20821 if next_offset >= end_offset {
20822 return Ok(());
20823 }
20824
20825 // Decode unknown envelopes for gaps in ordinals.
20826 while _next_ordinal_to_read < 1 {
20827 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20828 _next_ordinal_to_read += 1;
20829 next_offset += envelope_size;
20830 }
20831
20832 let next_out_of_line = decoder.next_out_of_line();
20833 let handles_before = decoder.remaining_handles();
20834 if let Some((inlined, num_bytes, num_handles)) =
20835 fidl::encoding::decode_envelope_header(decoder, next_offset)?
20836 {
20837 let member_inline_size = <fidl::encoding::HandleType<
20838 fidl::EventPair,
20839 { fidl::ObjectType::EVENTPAIR.into_raw() },
20840 2147483648,
20841 > as fidl::encoding::TypeMarker>::inline_size(
20842 decoder.context
20843 );
20844 if inlined != (member_inline_size <= 4) {
20845 return Err(fidl::Error::InvalidInlineBitInEnvelope);
20846 }
20847 let inner_offset;
20848 let mut inner_depth = depth.clone();
20849 if inlined {
20850 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
20851 inner_offset = next_offset;
20852 } else {
20853 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
20854 inner_depth.increment()?;
20855 }
20856 let val_ref =
20857 self.server_end.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
20858 fidl::decode!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
20859 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
20860 {
20861 return Err(fidl::Error::InvalidNumBytesInEnvelope);
20862 }
20863 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
20864 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
20865 }
20866 }
20867
20868 next_offset += envelope_size;
20869
20870 // Decode the remaining unknown envelopes.
20871 while next_offset < end_offset {
20872 _next_ordinal_to_read += 1;
20873 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20874 next_offset += envelope_size;
20875 }
20876
20877 Ok(())
20878 }
20879 }
20880
20881 impl NodeIsAlternateForRequest {
20882 #[inline(always)]
20883 fn max_ordinal_present(&self) -> u64 {
20884 if let Some(_) = self.node_ref {
20885 return 1;
20886 }
20887 0
20888 }
20889 }
20890
20891 impl fidl::encoding::ResourceTypeMarker for NodeIsAlternateForRequest {
20892 type Borrowed<'a> = &'a mut Self;
20893 fn take_or_borrow<'a>(
20894 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
20895 ) -> Self::Borrowed<'a> {
20896 value
20897 }
20898 }
20899
20900 unsafe impl fidl::encoding::TypeMarker for NodeIsAlternateForRequest {
20901 type Owned = Self;
20902
20903 #[inline(always)]
20904 fn inline_align(_context: fidl::encoding::Context) -> usize {
20905 8
20906 }
20907
20908 #[inline(always)]
20909 fn inline_size(_context: fidl::encoding::Context) -> usize {
20910 16
20911 }
20912 }
20913
20914 unsafe impl
20915 fidl::encoding::Encode<
20916 NodeIsAlternateForRequest,
20917 fidl::encoding::DefaultFuchsiaResourceDialect,
20918 > for &mut NodeIsAlternateForRequest
20919 {
20920 unsafe fn encode(
20921 self,
20922 encoder: &mut fidl::encoding::Encoder<
20923 '_,
20924 fidl::encoding::DefaultFuchsiaResourceDialect,
20925 >,
20926 offset: usize,
20927 mut depth: fidl::encoding::Depth,
20928 ) -> fidl::Result<()> {
20929 encoder.debug_check_bounds::<NodeIsAlternateForRequest>(offset);
20930 // Vector header
20931 let max_ordinal: u64 = self.max_ordinal_present();
20932 encoder.write_num(max_ordinal, offset);
20933 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
20934 // Calling encoder.out_of_line_offset(0) is not allowed.
20935 if max_ordinal == 0 {
20936 return Ok(());
20937 }
20938 depth.increment()?;
20939 let envelope_size = 8;
20940 let bytes_len = max_ordinal as usize * envelope_size;
20941 #[allow(unused_variables)]
20942 let offset = encoder.out_of_line_offset(bytes_len);
20943 let mut _prev_end_offset: usize = 0;
20944 if 1 > max_ordinal {
20945 return Ok(());
20946 }
20947
20948 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
20949 // are envelope_size bytes.
20950 let cur_offset: usize = (1 - 1) * envelope_size;
20951
20952 // Zero reserved fields.
20953 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
20954
20955 // Safety:
20956 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
20957 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
20958 // envelope_size bytes, there is always sufficient room.
20959 fidl::encoding::encode_in_envelope_optional::<
20960 fidl::encoding::HandleType<
20961 fidl::Event,
20962 { fidl::ObjectType::EVENT.into_raw() },
20963 2147483648,
20964 >,
20965 fidl::encoding::DefaultFuchsiaResourceDialect,
20966 >(
20967 self.node_ref.as_mut().map(
20968 <fidl::encoding::HandleType<
20969 fidl::Event,
20970 { fidl::ObjectType::EVENT.into_raw() },
20971 2147483648,
20972 > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
20973 ),
20974 encoder,
20975 offset + cur_offset,
20976 depth,
20977 )?;
20978
20979 _prev_end_offset = cur_offset + envelope_size;
20980
20981 Ok(())
20982 }
20983 }
20984
20985 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
20986 for NodeIsAlternateForRequest
20987 {
20988 #[inline(always)]
20989 fn new_empty() -> Self {
20990 Self::default()
20991 }
20992
20993 unsafe fn decode(
20994 &mut self,
20995 decoder: &mut fidl::encoding::Decoder<
20996 '_,
20997 fidl::encoding::DefaultFuchsiaResourceDialect,
20998 >,
20999 offset: usize,
21000 mut depth: fidl::encoding::Depth,
21001 ) -> fidl::Result<()> {
21002 decoder.debug_check_bounds::<Self>(offset);
21003 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
21004 None => return Err(fidl::Error::NotNullable),
21005 Some(len) => len,
21006 };
21007 // Calling decoder.out_of_line_offset(0) is not allowed.
21008 if len == 0 {
21009 return Ok(());
21010 };
21011 depth.increment()?;
21012 let envelope_size = 8;
21013 let bytes_len = len * envelope_size;
21014 let offset = decoder.out_of_line_offset(bytes_len)?;
21015 // Decode the envelope for each type.
21016 let mut _next_ordinal_to_read = 0;
21017 let mut next_offset = offset;
21018 let end_offset = offset + bytes_len;
21019 _next_ordinal_to_read += 1;
21020 if next_offset >= end_offset {
21021 return Ok(());
21022 }
21023
21024 // Decode unknown envelopes for gaps in ordinals.
21025 while _next_ordinal_to_read < 1 {
21026 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21027 _next_ordinal_to_read += 1;
21028 next_offset += envelope_size;
21029 }
21030
21031 let next_out_of_line = decoder.next_out_of_line();
21032 let handles_before = decoder.remaining_handles();
21033 if let Some((inlined, num_bytes, num_handles)) =
21034 fidl::encoding::decode_envelope_header(decoder, next_offset)?
21035 {
21036 let member_inline_size = <fidl::encoding::HandleType<
21037 fidl::Event,
21038 { fidl::ObjectType::EVENT.into_raw() },
21039 2147483648,
21040 > as fidl::encoding::TypeMarker>::inline_size(
21041 decoder.context
21042 );
21043 if inlined != (member_inline_size <= 4) {
21044 return Err(fidl::Error::InvalidInlineBitInEnvelope);
21045 }
21046 let inner_offset;
21047 let mut inner_depth = depth.clone();
21048 if inlined {
21049 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
21050 inner_offset = next_offset;
21051 } else {
21052 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
21053 inner_depth.increment()?;
21054 }
21055 let val_ref =
21056 self.node_ref.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::Event, { fidl::ObjectType::EVENT.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
21057 fidl::decode!(fidl::encoding::HandleType<fidl::Event, { fidl::ObjectType::EVENT.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
21058 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
21059 {
21060 return Err(fidl::Error::InvalidNumBytesInEnvelope);
21061 }
21062 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
21063 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
21064 }
21065 }
21066
21067 next_offset += envelope_size;
21068
21069 // Decode the remaining unknown envelopes.
21070 while next_offset < end_offset {
21071 _next_ordinal_to_read += 1;
21072 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21073 next_offset += envelope_size;
21074 }
21075
21076 Ok(())
21077 }
21078 }
21079
21080 impl NodeSetWeakOkRequest {
21081 #[inline(always)]
21082 fn max_ordinal_present(&self) -> u64 {
21083 if let Some(_) = self.for_child_nodes_also {
21084 return 1;
21085 }
21086 0
21087 }
21088 }
21089
21090 impl fidl::encoding::ResourceTypeMarker for NodeSetWeakOkRequest {
21091 type Borrowed<'a> = &'a mut Self;
21092 fn take_or_borrow<'a>(
21093 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
21094 ) -> Self::Borrowed<'a> {
21095 value
21096 }
21097 }
21098
21099 unsafe impl fidl::encoding::TypeMarker for NodeSetWeakOkRequest {
21100 type Owned = Self;
21101
21102 #[inline(always)]
21103 fn inline_align(_context: fidl::encoding::Context) -> usize {
21104 8
21105 }
21106
21107 #[inline(always)]
21108 fn inline_size(_context: fidl::encoding::Context) -> usize {
21109 16
21110 }
21111 }
21112
21113 unsafe impl
21114 fidl::encoding::Encode<NodeSetWeakOkRequest, fidl::encoding::DefaultFuchsiaResourceDialect>
21115 for &mut NodeSetWeakOkRequest
21116 {
21117 unsafe fn encode(
21118 self,
21119 encoder: &mut fidl::encoding::Encoder<
21120 '_,
21121 fidl::encoding::DefaultFuchsiaResourceDialect,
21122 >,
21123 offset: usize,
21124 mut depth: fidl::encoding::Depth,
21125 ) -> fidl::Result<()> {
21126 encoder.debug_check_bounds::<NodeSetWeakOkRequest>(offset);
21127 // Vector header
21128 let max_ordinal: u64 = self.max_ordinal_present();
21129 encoder.write_num(max_ordinal, offset);
21130 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
21131 // Calling encoder.out_of_line_offset(0) is not allowed.
21132 if max_ordinal == 0 {
21133 return Ok(());
21134 }
21135 depth.increment()?;
21136 let envelope_size = 8;
21137 let bytes_len = max_ordinal as usize * envelope_size;
21138 #[allow(unused_variables)]
21139 let offset = encoder.out_of_line_offset(bytes_len);
21140 let mut _prev_end_offset: usize = 0;
21141 if 1 > max_ordinal {
21142 return Ok(());
21143 }
21144
21145 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
21146 // are envelope_size bytes.
21147 let cur_offset: usize = (1 - 1) * envelope_size;
21148
21149 // Zero reserved fields.
21150 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
21151
21152 // Safety:
21153 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
21154 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
21155 // envelope_size bytes, there is always sufficient room.
21156 fidl::encoding::encode_in_envelope_optional::<
21157 bool,
21158 fidl::encoding::DefaultFuchsiaResourceDialect,
21159 >(
21160 self.for_child_nodes_also
21161 .as_ref()
21162 .map(<bool as fidl::encoding::ValueTypeMarker>::borrow),
21163 encoder,
21164 offset + cur_offset,
21165 depth,
21166 )?;
21167
21168 _prev_end_offset = cur_offset + envelope_size;
21169
21170 Ok(())
21171 }
21172 }
21173
21174 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
21175 for NodeSetWeakOkRequest
21176 {
21177 #[inline(always)]
21178 fn new_empty() -> Self {
21179 Self::default()
21180 }
21181
21182 unsafe fn decode(
21183 &mut self,
21184 decoder: &mut fidl::encoding::Decoder<
21185 '_,
21186 fidl::encoding::DefaultFuchsiaResourceDialect,
21187 >,
21188 offset: usize,
21189 mut depth: fidl::encoding::Depth,
21190 ) -> fidl::Result<()> {
21191 decoder.debug_check_bounds::<Self>(offset);
21192 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
21193 None => return Err(fidl::Error::NotNullable),
21194 Some(len) => len,
21195 };
21196 // Calling decoder.out_of_line_offset(0) is not allowed.
21197 if len == 0 {
21198 return Ok(());
21199 };
21200 depth.increment()?;
21201 let envelope_size = 8;
21202 let bytes_len = len * envelope_size;
21203 let offset = decoder.out_of_line_offset(bytes_len)?;
21204 // Decode the envelope for each type.
21205 let mut _next_ordinal_to_read = 0;
21206 let mut next_offset = offset;
21207 let end_offset = offset + bytes_len;
21208 _next_ordinal_to_read += 1;
21209 if next_offset >= end_offset {
21210 return Ok(());
21211 }
21212
21213 // Decode unknown envelopes for gaps in ordinals.
21214 while _next_ordinal_to_read < 1 {
21215 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21216 _next_ordinal_to_read += 1;
21217 next_offset += envelope_size;
21218 }
21219
21220 let next_out_of_line = decoder.next_out_of_line();
21221 let handles_before = decoder.remaining_handles();
21222 if let Some((inlined, num_bytes, num_handles)) =
21223 fidl::encoding::decode_envelope_header(decoder, next_offset)?
21224 {
21225 let member_inline_size =
21226 <bool as fidl::encoding::TypeMarker>::inline_size(decoder.context);
21227 if inlined != (member_inline_size <= 4) {
21228 return Err(fidl::Error::InvalidInlineBitInEnvelope);
21229 }
21230 let inner_offset;
21231 let mut inner_depth = depth.clone();
21232 if inlined {
21233 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
21234 inner_offset = next_offset;
21235 } else {
21236 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
21237 inner_depth.increment()?;
21238 }
21239 let val_ref = self.for_child_nodes_also.get_or_insert_with(|| {
21240 fidl::new_empty!(bool, fidl::encoding::DefaultFuchsiaResourceDialect)
21241 });
21242 fidl::decode!(
21243 bool,
21244 fidl::encoding::DefaultFuchsiaResourceDialect,
21245 val_ref,
21246 decoder,
21247 inner_offset,
21248 inner_depth
21249 )?;
21250 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
21251 {
21252 return Err(fidl::Error::InvalidNumBytesInEnvelope);
21253 }
21254 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
21255 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
21256 }
21257 }
21258
21259 next_offset += envelope_size;
21260
21261 // Decode the remaining unknown envelopes.
21262 while next_offset < end_offset {
21263 _next_ordinal_to_read += 1;
21264 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21265 next_offset += envelope_size;
21266 }
21267
21268 Ok(())
21269 }
21270 }
21271
21272 impl NodeGetNodeRefResponse {
21273 #[inline(always)]
21274 fn max_ordinal_present(&self) -> u64 {
21275 if let Some(_) = self.node_ref {
21276 return 1;
21277 }
21278 0
21279 }
21280 }
21281
21282 impl fidl::encoding::ResourceTypeMarker for NodeGetNodeRefResponse {
21283 type Borrowed<'a> = &'a mut Self;
21284 fn take_or_borrow<'a>(
21285 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
21286 ) -> Self::Borrowed<'a> {
21287 value
21288 }
21289 }
21290
21291 unsafe impl fidl::encoding::TypeMarker for NodeGetNodeRefResponse {
21292 type Owned = Self;
21293
21294 #[inline(always)]
21295 fn inline_align(_context: fidl::encoding::Context) -> usize {
21296 8
21297 }
21298
21299 #[inline(always)]
21300 fn inline_size(_context: fidl::encoding::Context) -> usize {
21301 16
21302 }
21303 }
21304
21305 unsafe impl
21306 fidl::encoding::Encode<
21307 NodeGetNodeRefResponse,
21308 fidl::encoding::DefaultFuchsiaResourceDialect,
21309 > for &mut NodeGetNodeRefResponse
21310 {
21311 unsafe fn encode(
21312 self,
21313 encoder: &mut fidl::encoding::Encoder<
21314 '_,
21315 fidl::encoding::DefaultFuchsiaResourceDialect,
21316 >,
21317 offset: usize,
21318 mut depth: fidl::encoding::Depth,
21319 ) -> fidl::Result<()> {
21320 encoder.debug_check_bounds::<NodeGetNodeRefResponse>(offset);
21321 // Vector header
21322 let max_ordinal: u64 = self.max_ordinal_present();
21323 encoder.write_num(max_ordinal, offset);
21324 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
21325 // Calling encoder.out_of_line_offset(0) is not allowed.
21326 if max_ordinal == 0 {
21327 return Ok(());
21328 }
21329 depth.increment()?;
21330 let envelope_size = 8;
21331 let bytes_len = max_ordinal as usize * envelope_size;
21332 #[allow(unused_variables)]
21333 let offset = encoder.out_of_line_offset(bytes_len);
21334 let mut _prev_end_offset: usize = 0;
21335 if 1 > max_ordinal {
21336 return Ok(());
21337 }
21338
21339 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
21340 // are envelope_size bytes.
21341 let cur_offset: usize = (1 - 1) * envelope_size;
21342
21343 // Zero reserved fields.
21344 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
21345
21346 // Safety:
21347 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
21348 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
21349 // envelope_size bytes, there is always sufficient room.
21350 fidl::encoding::encode_in_envelope_optional::<
21351 fidl::encoding::HandleType<
21352 fidl::Event,
21353 { fidl::ObjectType::EVENT.into_raw() },
21354 2147483648,
21355 >,
21356 fidl::encoding::DefaultFuchsiaResourceDialect,
21357 >(
21358 self.node_ref.as_mut().map(
21359 <fidl::encoding::HandleType<
21360 fidl::Event,
21361 { fidl::ObjectType::EVENT.into_raw() },
21362 2147483648,
21363 > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
21364 ),
21365 encoder,
21366 offset + cur_offset,
21367 depth,
21368 )?;
21369
21370 _prev_end_offset = cur_offset + envelope_size;
21371
21372 Ok(())
21373 }
21374 }
21375
21376 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
21377 for NodeGetNodeRefResponse
21378 {
21379 #[inline(always)]
21380 fn new_empty() -> Self {
21381 Self::default()
21382 }
21383
21384 unsafe fn decode(
21385 &mut self,
21386 decoder: &mut fidl::encoding::Decoder<
21387 '_,
21388 fidl::encoding::DefaultFuchsiaResourceDialect,
21389 >,
21390 offset: usize,
21391 mut depth: fidl::encoding::Depth,
21392 ) -> fidl::Result<()> {
21393 decoder.debug_check_bounds::<Self>(offset);
21394 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
21395 None => return Err(fidl::Error::NotNullable),
21396 Some(len) => len,
21397 };
21398 // Calling decoder.out_of_line_offset(0) is not allowed.
21399 if len == 0 {
21400 return Ok(());
21401 };
21402 depth.increment()?;
21403 let envelope_size = 8;
21404 let bytes_len = len * envelope_size;
21405 let offset = decoder.out_of_line_offset(bytes_len)?;
21406 // Decode the envelope for each type.
21407 let mut _next_ordinal_to_read = 0;
21408 let mut next_offset = offset;
21409 let end_offset = offset + bytes_len;
21410 _next_ordinal_to_read += 1;
21411 if next_offset >= end_offset {
21412 return Ok(());
21413 }
21414
21415 // Decode unknown envelopes for gaps in ordinals.
21416 while _next_ordinal_to_read < 1 {
21417 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21418 _next_ordinal_to_read += 1;
21419 next_offset += envelope_size;
21420 }
21421
21422 let next_out_of_line = decoder.next_out_of_line();
21423 let handles_before = decoder.remaining_handles();
21424 if let Some((inlined, num_bytes, num_handles)) =
21425 fidl::encoding::decode_envelope_header(decoder, next_offset)?
21426 {
21427 let member_inline_size = <fidl::encoding::HandleType<
21428 fidl::Event,
21429 { fidl::ObjectType::EVENT.into_raw() },
21430 2147483648,
21431 > as fidl::encoding::TypeMarker>::inline_size(
21432 decoder.context
21433 );
21434 if inlined != (member_inline_size <= 4) {
21435 return Err(fidl::Error::InvalidInlineBitInEnvelope);
21436 }
21437 let inner_offset;
21438 let mut inner_depth = depth.clone();
21439 if inlined {
21440 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
21441 inner_offset = next_offset;
21442 } else {
21443 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
21444 inner_depth.increment()?;
21445 }
21446 let val_ref =
21447 self.node_ref.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::Event, { fidl::ObjectType::EVENT.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
21448 fidl::decode!(fidl::encoding::HandleType<fidl::Event, { fidl::ObjectType::EVENT.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
21449 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
21450 {
21451 return Err(fidl::Error::InvalidNumBytesInEnvelope);
21452 }
21453 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
21454 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
21455 }
21456 }
21457
21458 next_offset += envelope_size;
21459
21460 // Decode the remaining unknown envelopes.
21461 while next_offset < end_offset {
21462 _next_ordinal_to_read += 1;
21463 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21464 next_offset += envelope_size;
21465 }
21466
21467 Ok(())
21468 }
21469 }
21470
21471 impl VmoBuffer {
21472 #[inline(always)]
21473 fn max_ordinal_present(&self) -> u64 {
21474 if let Some(_) = self.close_weak_asap {
21475 return 3;
21476 }
21477 if let Some(_) = self.vmo_usable_start {
21478 return 2;
21479 }
21480 if let Some(_) = self.vmo {
21481 return 1;
21482 }
21483 0
21484 }
21485 }
21486
21487 impl fidl::encoding::ResourceTypeMarker for VmoBuffer {
21488 type Borrowed<'a> = &'a mut Self;
21489 fn take_or_borrow<'a>(
21490 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
21491 ) -> Self::Borrowed<'a> {
21492 value
21493 }
21494 }
21495
21496 unsafe impl fidl::encoding::TypeMarker for VmoBuffer {
21497 type Owned = Self;
21498
21499 #[inline(always)]
21500 fn inline_align(_context: fidl::encoding::Context) -> usize {
21501 8
21502 }
21503
21504 #[inline(always)]
21505 fn inline_size(_context: fidl::encoding::Context) -> usize {
21506 16
21507 }
21508 }
21509
21510 unsafe impl fidl::encoding::Encode<VmoBuffer, fidl::encoding::DefaultFuchsiaResourceDialect>
21511 for &mut VmoBuffer
21512 {
21513 unsafe fn encode(
21514 self,
21515 encoder: &mut fidl::encoding::Encoder<
21516 '_,
21517 fidl::encoding::DefaultFuchsiaResourceDialect,
21518 >,
21519 offset: usize,
21520 mut depth: fidl::encoding::Depth,
21521 ) -> fidl::Result<()> {
21522 encoder.debug_check_bounds::<VmoBuffer>(offset);
21523 // Vector header
21524 let max_ordinal: u64 = self.max_ordinal_present();
21525 encoder.write_num(max_ordinal, offset);
21526 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
21527 // Calling encoder.out_of_line_offset(0) is not allowed.
21528 if max_ordinal == 0 {
21529 return Ok(());
21530 }
21531 depth.increment()?;
21532 let envelope_size = 8;
21533 let bytes_len = max_ordinal as usize * envelope_size;
21534 #[allow(unused_variables)]
21535 let offset = encoder.out_of_line_offset(bytes_len);
21536 let mut _prev_end_offset: usize = 0;
21537 if 1 > max_ordinal {
21538 return Ok(());
21539 }
21540
21541 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
21542 // are envelope_size bytes.
21543 let cur_offset: usize = (1 - 1) * envelope_size;
21544
21545 // Zero reserved fields.
21546 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
21547
21548 // Safety:
21549 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
21550 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
21551 // envelope_size bytes, there is always sufficient room.
21552 fidl::encoding::encode_in_envelope_optional::<
21553 fidl::encoding::HandleType<
21554 fidl::Vmo,
21555 { fidl::ObjectType::VMO.into_raw() },
21556 2147483648,
21557 >,
21558 fidl::encoding::DefaultFuchsiaResourceDialect,
21559 >(
21560 self.vmo.as_mut().map(
21561 <fidl::encoding::HandleType<
21562 fidl::Vmo,
21563 { fidl::ObjectType::VMO.into_raw() },
21564 2147483648,
21565 > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
21566 ),
21567 encoder,
21568 offset + cur_offset,
21569 depth,
21570 )?;
21571
21572 _prev_end_offset = cur_offset + envelope_size;
21573 if 2 > max_ordinal {
21574 return Ok(());
21575 }
21576
21577 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
21578 // are envelope_size bytes.
21579 let cur_offset: usize = (2 - 1) * envelope_size;
21580
21581 // Zero reserved fields.
21582 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
21583
21584 // Safety:
21585 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
21586 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
21587 // envelope_size bytes, there is always sufficient room.
21588 fidl::encoding::encode_in_envelope_optional::<
21589 u64,
21590 fidl::encoding::DefaultFuchsiaResourceDialect,
21591 >(
21592 self.vmo_usable_start
21593 .as_ref()
21594 .map(<u64 as fidl::encoding::ValueTypeMarker>::borrow),
21595 encoder,
21596 offset + cur_offset,
21597 depth,
21598 )?;
21599
21600 _prev_end_offset = cur_offset + envelope_size;
21601 if 3 > max_ordinal {
21602 return Ok(());
21603 }
21604
21605 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
21606 // are envelope_size bytes.
21607 let cur_offset: usize = (3 - 1) * envelope_size;
21608
21609 // Zero reserved fields.
21610 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
21611
21612 // Safety:
21613 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
21614 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
21615 // envelope_size bytes, there is always sufficient room.
21616 fidl::encoding::encode_in_envelope_optional::<
21617 fidl::encoding::HandleType<
21618 fidl::EventPair,
21619 { fidl::ObjectType::EVENTPAIR.into_raw() },
21620 2147483648,
21621 >,
21622 fidl::encoding::DefaultFuchsiaResourceDialect,
21623 >(
21624 self.close_weak_asap.as_mut().map(
21625 <fidl::encoding::HandleType<
21626 fidl::EventPair,
21627 { fidl::ObjectType::EVENTPAIR.into_raw() },
21628 2147483648,
21629 > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
21630 ),
21631 encoder,
21632 offset + cur_offset,
21633 depth,
21634 )?;
21635
21636 _prev_end_offset = cur_offset + envelope_size;
21637
21638 Ok(())
21639 }
21640 }
21641
21642 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect> for VmoBuffer {
21643 #[inline(always)]
21644 fn new_empty() -> Self {
21645 Self::default()
21646 }
21647
21648 unsafe fn decode(
21649 &mut self,
21650 decoder: &mut fidl::encoding::Decoder<
21651 '_,
21652 fidl::encoding::DefaultFuchsiaResourceDialect,
21653 >,
21654 offset: usize,
21655 mut depth: fidl::encoding::Depth,
21656 ) -> fidl::Result<()> {
21657 decoder.debug_check_bounds::<Self>(offset);
21658 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
21659 None => return Err(fidl::Error::NotNullable),
21660 Some(len) => len,
21661 };
21662 // Calling decoder.out_of_line_offset(0) is not allowed.
21663 if len == 0 {
21664 return Ok(());
21665 };
21666 depth.increment()?;
21667 let envelope_size = 8;
21668 let bytes_len = len * envelope_size;
21669 let offset = decoder.out_of_line_offset(bytes_len)?;
21670 // Decode the envelope for each type.
21671 let mut _next_ordinal_to_read = 0;
21672 let mut next_offset = offset;
21673 let end_offset = offset + bytes_len;
21674 _next_ordinal_to_read += 1;
21675 if next_offset >= end_offset {
21676 return Ok(());
21677 }
21678
21679 // Decode unknown envelopes for gaps in ordinals.
21680 while _next_ordinal_to_read < 1 {
21681 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21682 _next_ordinal_to_read += 1;
21683 next_offset += envelope_size;
21684 }
21685
21686 let next_out_of_line = decoder.next_out_of_line();
21687 let handles_before = decoder.remaining_handles();
21688 if let Some((inlined, num_bytes, num_handles)) =
21689 fidl::encoding::decode_envelope_header(decoder, next_offset)?
21690 {
21691 let member_inline_size = <fidl::encoding::HandleType<
21692 fidl::Vmo,
21693 { fidl::ObjectType::VMO.into_raw() },
21694 2147483648,
21695 > as fidl::encoding::TypeMarker>::inline_size(
21696 decoder.context
21697 );
21698 if inlined != (member_inline_size <= 4) {
21699 return Err(fidl::Error::InvalidInlineBitInEnvelope);
21700 }
21701 let inner_offset;
21702 let mut inner_depth = depth.clone();
21703 if inlined {
21704 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
21705 inner_offset = next_offset;
21706 } else {
21707 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
21708 inner_depth.increment()?;
21709 }
21710 let val_ref =
21711 self.vmo.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::Vmo, { fidl::ObjectType::VMO.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
21712 fidl::decode!(fidl::encoding::HandleType<fidl::Vmo, { fidl::ObjectType::VMO.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
21713 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
21714 {
21715 return Err(fidl::Error::InvalidNumBytesInEnvelope);
21716 }
21717 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
21718 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
21719 }
21720 }
21721
21722 next_offset += envelope_size;
21723 _next_ordinal_to_read += 1;
21724 if next_offset >= end_offset {
21725 return Ok(());
21726 }
21727
21728 // Decode unknown envelopes for gaps in ordinals.
21729 while _next_ordinal_to_read < 2 {
21730 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21731 _next_ordinal_to_read += 1;
21732 next_offset += envelope_size;
21733 }
21734
21735 let next_out_of_line = decoder.next_out_of_line();
21736 let handles_before = decoder.remaining_handles();
21737 if let Some((inlined, num_bytes, num_handles)) =
21738 fidl::encoding::decode_envelope_header(decoder, next_offset)?
21739 {
21740 let member_inline_size =
21741 <u64 as fidl::encoding::TypeMarker>::inline_size(decoder.context);
21742 if inlined != (member_inline_size <= 4) {
21743 return Err(fidl::Error::InvalidInlineBitInEnvelope);
21744 }
21745 let inner_offset;
21746 let mut inner_depth = depth.clone();
21747 if inlined {
21748 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
21749 inner_offset = next_offset;
21750 } else {
21751 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
21752 inner_depth.increment()?;
21753 }
21754 let val_ref = self.vmo_usable_start.get_or_insert_with(|| {
21755 fidl::new_empty!(u64, fidl::encoding::DefaultFuchsiaResourceDialect)
21756 });
21757 fidl::decode!(
21758 u64,
21759 fidl::encoding::DefaultFuchsiaResourceDialect,
21760 val_ref,
21761 decoder,
21762 inner_offset,
21763 inner_depth
21764 )?;
21765 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
21766 {
21767 return Err(fidl::Error::InvalidNumBytesInEnvelope);
21768 }
21769 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
21770 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
21771 }
21772 }
21773
21774 next_offset += envelope_size;
21775 _next_ordinal_to_read += 1;
21776 if next_offset >= end_offset {
21777 return Ok(());
21778 }
21779
21780 // Decode unknown envelopes for gaps in ordinals.
21781 while _next_ordinal_to_read < 3 {
21782 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21783 _next_ordinal_to_read += 1;
21784 next_offset += envelope_size;
21785 }
21786
21787 let next_out_of_line = decoder.next_out_of_line();
21788 let handles_before = decoder.remaining_handles();
21789 if let Some((inlined, num_bytes, num_handles)) =
21790 fidl::encoding::decode_envelope_header(decoder, next_offset)?
21791 {
21792 let member_inline_size = <fidl::encoding::HandleType<
21793 fidl::EventPair,
21794 { fidl::ObjectType::EVENTPAIR.into_raw() },
21795 2147483648,
21796 > as fidl::encoding::TypeMarker>::inline_size(
21797 decoder.context
21798 );
21799 if inlined != (member_inline_size <= 4) {
21800 return Err(fidl::Error::InvalidInlineBitInEnvelope);
21801 }
21802 let inner_offset;
21803 let mut inner_depth = depth.clone();
21804 if inlined {
21805 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
21806 inner_offset = next_offset;
21807 } else {
21808 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
21809 inner_depth.increment()?;
21810 }
21811 let val_ref =
21812 self.close_weak_asap.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
21813 fidl::decode!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
21814 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
21815 {
21816 return Err(fidl::Error::InvalidNumBytesInEnvelope);
21817 }
21818 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
21819 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
21820 }
21821 }
21822
21823 next_offset += envelope_size;
21824
21825 // Decode the remaining unknown envelopes.
21826 while next_offset < end_offset {
21827 _next_ordinal_to_read += 1;
21828 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21829 next_offset += envelope_size;
21830 }
21831
21832 Ok(())
21833 }
21834 }
21835}