Skip to main content

starnix_modules_fastrpc/
fastrpc.rs

1// Copyright 2025 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use crate::canonicalize_ioctl_request;
6use crate::dma_heap::{Alloc, dma_heap_device_register};
7use bitfield::bitfield;
8use bstr::ByteSlice;
9use fidl_fuchsia_hardware_qualcomm_fastrpc as frpc;
10use starnix_core::device::DeviceOps;
11use starnix_core::mm::memory::MemoryObject;
12use starnix_core::mm::{MemoryAccessor, MemoryAccessorExt, ProtectionFlags};
13use starnix_core::task::{CurrentTask, ThreadGroupKey};
14use starnix_core::vfs::{
15    Anon, FdFlags, FdNumber, FileObject, FileObjectState, FileOps, NamespaceNode, default_ioctl,
16};
17use starnix_core::{
18    fileops_impl_dataless, fileops_impl_memory, fileops_impl_noop_sync, fileops_impl_seekless,
19};
20use starnix_logging::{log_debug, log_error, log_warn};
21use starnix_sync::{FastrpcInnerState, FileOpsCore, Locked, OrderedMutex, Unlocked};
22use starnix_syscalls::{SUCCESS, SyscallArg, SyscallResult};
23use starnix_types::user_buffer::UserBuffer;
24use starnix_uapi::device_type::DeviceType;
25use starnix_uapi::errors::{Errno, ErrnoCode};
26use starnix_uapi::open_flags::OpenFlags;
27use starnix_uapi::user_address::{MultiArchUserRef, UserCString, UserRef};
28use starnix_uapi::{errno, error};
29use std::collections::VecDeque;
30use std::sync::atomic::{AtomicI64, Ordering};
31use std::sync::{Arc, OnceLock};
32use zx::HandleBased;
33
34type IoctlInvokeFdPtr = MultiArchUserRef<
35    linux_uapi::fastrpc_ioctl_invoke_fd,
36    linux_uapi::arch32::fastrpc_ioctl_invoke_fd,
37>;
38
39type IoctlInvoke2Ptr =
40    MultiArchUserRef<linux_uapi::fastrpc_ioctl_invoke2, linux_uapi::arch32::fastrpc_ioctl_invoke2>;
41
42type IoctlInitPtr =
43    MultiArchUserRef<linux_uapi::fastrpc_ioctl_init, linux_uapi::arch32::fastrpc_ioctl_init>;
44
45type IoctlInvokePtr =
46    MultiArchUserRef<linux_uapi::fastrpc_ioctl_invoke, linux_uapi::arch32::fastrpc_ioctl_invoke>;
47
48type RemoteBufPtr = MultiArchUserRef<linux_uapi::remote_buf, linux_uapi::arch32::remote_buf>;
49
50const FASTRPC_MAX_DSP_ATTRIBUTES: usize = 256;
51const FASTRPC_MAX_ATTRIBUTES: usize = 260;
52
53// Performance data capability not supported.
54const PERF_CAPABILITY_SUPPORT: u32 = 0;
55
56// Newer error version.
57const KERNEL_ERROR_CODE_V1_SUPPORT: u32 = 0;
58
59// Userspace allocation supported through dma-heap.
60const USERSPACE_ALLOCATION_SUPPORT: u32 = 1;
61
62// No signaling support.
63const DSPSIGNAL_SUPPORT: u32 = 0;
64
65const KERNEL_CAPABILITIES: [u32; FASTRPC_MAX_ATTRIBUTES - FASTRPC_MAX_DSP_ATTRIBUTES] = [
66    PERF_CAPABILITY_SUPPORT,
67    KERNEL_ERROR_CODE_V1_SUPPORT,
68    USERSPACE_ALLOCATION_SUPPORT,
69    DSPSIGNAL_SUPPORT,
70];
71
72const ASYNC_FASTRPC_CAP: usize = 9;
73const DMA_HANDLE_REVERSE_RPC_CAP: usize = 129;
74
75const INVOKE2_MAX: u32 = 4;
76
77const FASTRPC_INIT_ATTACH: u32 = 0;
78const FASTRPC_INIT_CREATE_STATIC: u32 = 2;
79
80const INIT_FILELEN_MAX: u32 = 2 * 1024 * 1024;
81const INIT_MEMLEN_MAX: u32 = 8 * 1024 * 1024;
82
83// Scalars:
84// These are how we designate the number of various elements inside an rpc method.
85// It comes in a u32 with the bit format:
86//
87// aaam mmmm    iiii iiii    oooo oooo    xxxx yyyy
88//
89// a = attribute (3 bits)
90// m = method (5 bits)
91// i = inbuf (8 bits)
92// o = outbuf (8 bits)
93// x = in handle (4 bits)
94// y = out handle (4 bits)
95//
96// Currently we only support buffers and not handles in this implementation.
97bitfield! {
98    pub struct Scalar(u32);
99    impl Debug;
100
101    pub method_id, _: 28, 24;
102    pub inbuffs, _: 23, 16;
103    pub outbuffs, _: 15, 8;
104    pub inhandles, _: 7, 4;
105    pub outhandles, _: 3, 0;
106}
107
108impl Scalar {
109    fn len(&self) -> u32 {
110        self.inbuffs() as u32
111            + self.outbuffs() as u32
112            + self.inhandles() as u32
113            + self.outhandles() as u32
114    }
115}
116
117// All fidl transport errors should be considered as error, and converted to IO error.
118fn fidl_error_to_errno(info: &str, error: fidl::Error) -> starnix_uapi::errors::Errno {
119    if !error.is_closed() {
120        log_error!("{}: {:?}", info, error);
121        return errno!(EIO);
122    }
123
124    // Log at most once every 5 seconds for PEER_CLOSED errors which can spam if the driver
125    // has crashed.
126    static LAST_LOG_TIME: AtomicI64 = AtomicI64::new(0);
127    let now = zx::MonotonicInstant::get().into_nanos();
128    let last = LAST_LOG_TIME.load(Ordering::Relaxed);
129    if now - last > 5_000_000_000 {
130        LAST_LOG_TIME.store(now, Ordering::Relaxed);
131        log_error!("{}: {:?}", info, error);
132    }
133    errno!(EIO)
134}
135
136// zx.Status errors from fidl domain errors can be converted into fdio-like errnos.
137fn zx_i32_to_errno(info: &str, error: i32) -> starnix_uapi::errors::Errno {
138    starnix_uapi::from_status_like_fdio!(zx::Status::from_raw(error), info)
139}
140
141// zx.Status errors from syscalls can be converted into fdio-like errnos.
142fn zx_status_to_errno(info: &str, error: zx::Status) -> starnix_uapi::errors::Errno {
143    starnix_uapi::from_status_like_fdio!(error, info)
144}
145
146// Directly passthrough retval errors from the driver to the user.
147fn retval_i32_to_errno(info: &str, error: i32) -> starnix_uapi::errors::Errno {
148    let code = ErrnoCode::from_return_value(error as u64);
149    log_debug!("{}: {:?}", info, code);
150    Errno::with_context(code, info)
151}
152
153fn fastrpc_align(size: u64) -> Result<u64, Errno> {
154    // 128 is the memory alignment within the fastrpc framework.
155    size.checked_next_multiple_of(128).ok_or_else(|| errno!(EOVERFLOW))
156}
157
158struct DmaBufFile {
159    memory: Arc<MemoryObject>,
160}
161
162impl DmaBufFile {
163    fn new(memory: Arc<MemoryObject>) -> Box<Self> {
164        Box::new(Self { memory })
165    }
166}
167
168impl FileOps for DmaBufFile {
169    fileops_impl_memory!(self, &self.memory);
170    fileops_impl_noop_sync!();
171
172    fn ioctl(
173        &self,
174        locked: &mut Locked<Unlocked>,
175        file: &FileObject,
176        current_task: &CurrentTask,
177        request: u32,
178        arg: SyscallArg,
179    ) -> Result<SyscallResult, Errno> {
180        match canonicalize_ioctl_request(current_task, request) {
181            linux_uapi::DMA_BUF_SET_NAME_B => {
182                let name = current_task.read_c_string_to_vec(
183                    UserCString::new(current_task, arg),
184                    linux_uapi::DMA_BUF_NAME_LEN as usize,
185                )?;
186                log_debug!(
187                    "dma buf file with koid {:?} got ioctl set name: {}",
188                    self.memory.get_koid(),
189                    name
190                );
191                self.memory.set_zx_name(&name);
192                Ok(SUCCESS)
193            }
194            _ => default_ioctl(file, locked, current_task, request, arg),
195        }
196    }
197}
198
199struct SystemHeap {
200    device: Arc<frpc::SecureFastRpcSynchronousProxy>,
201}
202
203impl Alloc for SystemHeap {
204    fn alloc(
205        &self,
206        locked: &mut Locked<Unlocked>,
207        current_task: &CurrentTask,
208        size: u64,
209        fd_flags: FdFlags,
210    ) -> Result<FdNumber, Errno> {
211        let vmo = self
212            .device
213            .allocate(size, zx::MonotonicInstant::INFINITE)
214            .map_err(|e| fidl_error_to_errno("allocate call", e))?
215            .map_err(|e| zx_i32_to_errno("allocate", e))?;
216
217        log_debug!("allocated vmo with koid {:?}", vmo.koid());
218
219        let memory = Arc::new(MemoryObject::from(vmo));
220
221        let file = Anon::new_private_file(
222            locked,
223            current_task,
224            DmaBufFile::new(memory),
225            OpenFlags::RDWR,
226            "[fastrpc:buffer]",
227        );
228
229        current_task.add_file(locked, file, fd_flags)
230    }
231}
232
233#[derive(Default)]
234struct FastRPCFileState {
235    session: Option<Arc<frpc::RemoteDomainSynchronousProxy>>,
236    payload_vmos: VecDeque<frpc::SharedPayloadBuffer>,
237    cid: Option<i32>,
238    pid: Option<ThreadGroupKey>,
239}
240
241struct ParsedInvoke {
242    invoke: linux_uapi::fastrpc_ioctl_invoke,
243    scalar: Scalar,
244    fd_vmos: Option<Vec<Option<zx::Vmo>>>,
245}
246
247#[derive(PartialEq)]
248struct BufferWithMergeInfo {
249    start: u64,
250    end: u64,
251    buffer_index: usize,
252    merge_contribution: u64,
253    merge_offset: u64,
254}
255
256impl std::fmt::Debug for BufferWithMergeInfo {
257    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
258        f.debug_struct("BufferWithMergeInfo")
259            .field("start", &format_args!("{:#x}", self.start))
260            .field("end", &format_args!("{:#x}", self.end))
261            .field("buffer_index", &self.buffer_index)
262            .field("merge_contribution", &format_args!("{:#x}", self.merge_contribution))
263            .field("merge_offset", &self.merge_offset)
264            .finish()
265    }
266}
267
268struct OutputArgumentInfo {
269    mapped: bool,
270    offset: u64,
271    length: u64,
272}
273
274struct PayloadInformation {
275    payload_buffer: Option<frpc::SharedPayloadBuffer>,
276    input_args: Vec<frpc::ArgumentEntry>,
277    output_args: Vec<frpc::ArgumentEntry>,
278    output_info: Vec<OutputArgumentInfo>,
279}
280
281struct FastRPCFile {
282    pid_open: ThreadGroupKey,
283    device: Arc<frpc::SecureFastRpcSynchronousProxy>,
284    cached_capabilities: Arc<OnceLock<[u32; FASTRPC_MAX_DSP_ATTRIBUTES]>>,
285    inner_state: OrderedMutex<FastRPCFileState, FastrpcInnerState>,
286}
287
288impl FastRPCFile {
289    fn new(
290        pid_open: ThreadGroupKey,
291        device: Arc<frpc::SecureFastRpcSynchronousProxy>,
292        cached_capabilities: Arc<OnceLock<[u32; FASTRPC_MAX_DSP_ATTRIBUTES]>>,
293    ) -> Self {
294        Self {
295            pid_open,
296            device,
297            cached_capabilities,
298            inner_state: OrderedMutex::new(FastRPCFileState::default()),
299        }
300    }
301
302    fn invoke(
303        &self,
304        current_task: &CurrentTask,
305        locked: &mut Locked<Unlocked>,
306        request: u32,
307        arg: SyscallArg,
308    ) -> Result<SyscallResult, Errno> {
309        let parsed_invoke = Self::parse_invoke_request(current_task, request, arg)?;
310        let ParsedInvoke { invoke: info, scalar, mut fd_vmos } = parsed_invoke;
311
312        log_debug!(
313            "FastRPC ioctl invoke, scalar {} ({}, {}, {}), handle {}",
314            info.sc,
315            scalar.method_id(),
316            scalar.inbuffs(),
317            scalar.outbuffs(),
318            info.handle
319        );
320
321        let length = scalar.len();
322        let inbufs = scalar.inbuffs() as u32;
323
324        if scalar.inhandles() != 0 || scalar.outhandles() != 0 {
325            log_error!("handles in scalar not supported.");
326            return error!(ENOSYS);
327        }
328
329        let remote_bufs = current_task.read_multi_arch_objects_to_vec(
330            RemoteBufPtr::new(current_task, info.pra),
331            length as usize,
332        )?;
333        let merged_buffers = Self::merge_buffers(&fd_vmos, &remote_bufs)?;
334        let payload = Self::get_payload_info(
335            current_task,
336            locked,
337            &self.inner_state,
338            &merged_buffers,
339            &remote_bufs,
340            &mut fd_vmos,
341            inbufs,
342        )?;
343
344        let payload_buffer_id = match &payload.payload_buffer {
345            Some(buffer) => buffer.id,
346            None => 0,
347        };
348
349        let session = self.get_session(locked)?;
350        let invoke_res = session.invoke(
351            current_task.get_tid(),
352            info.handle,
353            scalar.method_id() as u32,
354            payload_buffer_id,
355            payload.input_args,
356            payload.output_args,
357            zx::MonotonicInstant::INFINITE,
358        );
359
360        let buffer_after_invoke = |success: bool| -> Result<(), Errno> {
361            if success {
362                if let Some(buffer) = &payload.payload_buffer {
363                    self.process_out_bufs(
364                        current_task,
365                        &remote_bufs,
366                        &buffer.vmo,
367                        &payload.output_info,
368                        inbufs,
369                    )?;
370                }
371            }
372
373            if let Some(buffer) = payload.payload_buffer {
374                log_debug!("returning payload buffer {}", buffer.id);
375                self.inner_state.lock(locked).payload_vmos.push_back(buffer);
376            };
377
378            Ok(())
379        };
380
381        match invoke_res {
382            Ok(Ok(())) => {
383                buffer_after_invoke(true)?;
384                Ok(SUCCESS)
385            }
386            Ok(Err(e)) => {
387                buffer_after_invoke(false)?;
388                Err(retval_i32_to_errno("invoke", e))
389            }
390            Err(e) => {
391                buffer_after_invoke(false)?;
392                Err(fidl_error_to_errno("invoke call", e))
393            }
394        }
395    }
396
397    fn get_session(
398        &self,
399        locked: &mut Locked<Unlocked>,
400    ) -> Result<Arc<frpc::RemoteDomainSynchronousProxy>, Errno> {
401        let inner = self.inner_state.lock(locked);
402        Ok(inner.session.as_ref().ok_or_else(|| errno!(ENOENT))?.clone())
403    }
404
405    fn get_capabilities_from_device(
406        &self,
407        _domain: u32,
408    ) -> Result<[u32; FASTRPC_MAX_DSP_ATTRIBUTES], Errno> {
409        let capabilities = self
410            .device
411            .get_capabilities(zx::MonotonicInstant::INFINITE)
412            .map_err(|e| fidl_error_to_errno("get_capabilities call", e))?
413            .map_err(|e| retval_i32_to_errno("get_capabilities", e))?;
414
415        let mut res: [u32; FASTRPC_MAX_DSP_ATTRIBUTES] = [0; FASTRPC_MAX_DSP_ATTRIBUTES];
416        let attribute_buffer_length = FASTRPC_MAX_DSP_ATTRIBUTES - 1;
417        // 0th capability is not filled by the driver.
418        res[0] = 0;
419        res[1..(attribute_buffer_length + 1)]
420            .copy_from_slice(&capabilities[..attribute_buffer_length]);
421
422        log_debug!("ASYNC_FASTRPC_CAP: {}", res[ASYNC_FASTRPC_CAP]);
423        log_debug!("DMA_HANDLE_REVERSE_RPC_CAP: {}", res[DMA_HANDLE_REVERSE_RPC_CAP]);
424        Ok(res)
425    }
426
427    fn get_capabilities(&self, domain: u32, attr: usize) -> Result<u32, Errno> {
428        if attr >= FASTRPC_MAX_ATTRIBUTES {
429            return error!(EOVERFLOW);
430        }
431
432        if attr >= FASTRPC_MAX_DSP_ATTRIBUTES {
433            return Ok(KERNEL_CAPABILITIES[(attr) - FASTRPC_MAX_DSP_ATTRIBUTES]);
434        }
435
436        // OnceLock's get_or_try_init is a nightly feature so we end up with this which might call
437        // get_capabilities_from_device unnecessarily.
438        let caps = self.cached_capabilities.get();
439        match caps {
440            Some(caps) => Ok(caps[attr]),
441            None => {
442                let from_device = self.get_capabilities_from_device(domain)?;
443                let caps = self.cached_capabilities.get_or_init(|| from_device);
444                Ok(caps[attr])
445            }
446        }
447    }
448
449    fn parse_invoke_request(
450        current_task: &CurrentTask,
451        request: u32,
452        arg: SyscallArg,
453    ) -> Result<ParsedInvoke, Errno> {
454        match canonicalize_ioctl_request(current_task, request) {
455            linux_uapi::FASTRPC_IOCTL_INVOKE_FD => {
456                let info = current_task
457                    .read_multi_arch_object(IoctlInvokeFdPtr::new(current_task, arg))?;
458                log_debug!("FastRPC ioctl invoke_fd {:?}", info);
459
460                let scalar = Scalar(info.inv.sc);
461
462                let fds = current_task
463                    .read_objects_to_vec::<i32>(info.fds.into(), scalar.len() as usize)?;
464
465                // Collect the vmos for our fds, as well as a mapping to use locally to check
466                // if an entry is mapped or not.
467                let mut fd_vmos = vec![];
468                for fd in fds {
469                    // A non-postive fd signifies a non-mapped entry.
470                    if fd > 0 {
471                        let file = current_task.files.get(FdNumber::from_raw(fd))?;
472                        let dma_buf =
473                            file.downcast_file::<DmaBufFile>().ok_or_else(|| errno!(EBADF))?;
474
475                        let fd_vmo = dma_buf
476                            .memory
477                            .as_vmo()
478                            .ok_or_else(|| errno!(EBADF))?
479                            .duplicate_handle(fidl::Rights::SAME_RIGHTS)
480                            .map_err(|e| {
481                                zx_status_to_errno("parse_invoke_request duplicate_handle", e)
482                            })?;
483
484                        fd_vmos.push(Some(fd_vmo));
485                    } else {
486                        fd_vmos.push(None);
487                    }
488                }
489
490                Ok(ParsedInvoke { invoke: info.inv, scalar, fd_vmos: Some(fd_vmos) })
491            }
492            linux_uapi::FASTRPC_IOCTL_INVOKE => {
493                let info =
494                    current_task.read_multi_arch_object(IoctlInvokePtr::new(current_task, arg))?;
495                let scalar = Scalar(info.sc);
496                Ok(ParsedInvoke { invoke: info, scalar, fd_vmos: None })
497            }
498            _ => {
499                error!(ENOSYS)
500            }
501        }
502    }
503
504    fn merge_buffers(
505        fd_vmos: &Option<Vec<Option<zx::Vmo>>>,
506        remote_bufs: &[linux_uapi::remote_buf],
507    ) -> Result<Vec<BufferWithMergeInfo>, Errno> {
508        // Get the indices for the buffers since we will be shuffling them around.
509        let mut indexed_buffers = remote_bufs
510            .iter()
511            .enumerate()
512            .map(|(index, buf_ref)| (index, buf_ref))
513            .collect::<Vec<_>>();
514
515        // Sort them by start address, if equal start address we sort by reverse of end address.
516        indexed_buffers.sort_by(|(_, b1), (_, b2)| {
517            let start_comparison = b1.pv.cmp(&b2.pv);
518            let end_reverse_comparison = (b2.pv.addr + b2.len).cmp(&(b1.pv.addr + b1.len));
519            match start_comparison {
520                std::cmp::Ordering::Equal => end_reverse_comparison,
521                std::cmp::Ordering::Greater | std::cmp::Ordering::Less => start_comparison,
522            }
523        });
524
525        let mut results = Vec::new();
526
527        // This is used to track the current merge region's endpoint. We don't need to track
528        // a start as we have already sorted them using the start address.
529        let mut current_merge_end: u64 = 0;
530
531        for (original_idx, buffer) in indexed_buffers.into_iter() {
532            let start = buffer.pv.addr;
533            let end = buffer.pv.addr.checked_add(buffer.len).ok_or_else(|| errno!(EOVERFLOW))?;
534
535            // The merge_contribution signifies the unique memory that needs to be used to represent
536            // this buffer in memory.
537            let merge_contribution;
538
539            // The merge offset is used to get the actual start of a buffer given a merge point,
540            // this is a negative offset on the current_merge_end.
541            let merge_offset;
542
543            if Self::is_buffer_mapped(fd_vmos, original_idx) {
544                // Ignore buffers that are mapped in our overlap calculations.
545                merge_contribution = 0;
546                merge_offset = 0;
547            } else if start < current_merge_end && end <= current_merge_end {
548                // Buffer lives entirely in the current merged region.
549                merge_contribution = 0;
550                merge_offset = current_merge_end - start;
551            } else if start < current_merge_end {
552                // Buffer lives partially in the current merged region.
553                merge_contribution = end - current_merge_end;
554                merge_offset = current_merge_end - start;
555
556                // Extend the merge region.
557                current_merge_end = end;
558            } else {
559                // Buffer does not live anywhere in the current merged region.
560                merge_contribution = end - start;
561                merge_offset = 0;
562
563                // Start a new merged region.
564                current_merge_end = end;
565            }
566
567            results.push(BufferWithMergeInfo {
568                start,
569                end,
570                buffer_index: original_idx,
571                merge_contribution,
572                merge_offset,
573            });
574        }
575
576        Ok(results)
577    }
578
579    fn get_payload_size(
580        fd_vmos: &Option<Vec<Option<zx::Vmo>>>,
581        merged_buffers: &Vec<BufferWithMergeInfo>,
582    ) -> Result<u64, Errno> {
583        let mut size: u64 = 0;
584        for i in 0..merged_buffers.len() {
585            let buffer_index = merged_buffers[i].buffer_index;
586
587            // Include in payload if not mapped.
588            if !Self::is_buffer_mapped(fd_vmos, buffer_index) {
589                if merged_buffers[i].merge_offset == 0 {
590                    // Align each new merged region.
591                    size = fastrpc_align(size)?;
592                }
593
594                size = size
595                    .checked_add(merged_buffers[i].merge_contribution)
596                    .ok_or_else(|| errno!(EOVERFLOW))?;
597            }
598        }
599
600        Ok(size)
601    }
602
603    fn is_buffer_mapped(fd_vmos: &Option<Vec<Option<zx::Vmo>>>, idx: usize) -> bool {
604        match fd_vmos {
605            None => false,
606            Some(vmos) => vmos[idx].is_some(),
607        }
608    }
609
610    fn get_mapped_memory_and_offset(
611        current_task: &CurrentTask,
612        buf: &linux_uapi::remote_buf,
613        fd_vmos: &mut Option<Vec<Option<zx::Vmo>>>,
614        idx: usize,
615    ) -> Result<(u64, zx::Vmo), Errno> {
616        let (mm_vmo, mm_offset) = current_task
617            .mm()?
618            .get_mapping_memory(buf.pv.into(), ProtectionFlags::READ | ProtectionFlags::WRITE)?;
619
620        if let Some(fd_vmo) =
621            fd_vmos.as_deref_mut().and_then(|v| v.get_mut(idx)).and_then(|o| o.take())
622        {
623            if mm_vmo.get_koid()
624                == fd_vmo
625                    .basic_info()
626                    .map_err(|e| zx_status_to_errno("get_mapped_memory_and_offset basic_info", e))?
627                    .koid
628            {
629                log_debug!(
630                    "FastRPC ioctl invoke found allocated vmo for user address. koid: {:?}. User pointer: {:#x} offset in vmo: {}",
631                    mm_vmo.get_koid(),
632                    buf.pv.addr,
633                    mm_offset
634                );
635                return Ok((mm_offset, fd_vmo));
636            }
637        }
638
639        error!(ENOSYS)
640    }
641
642    fn get_payload_info(
643        current_task: &CurrentTask,
644        locked: &mut Locked<Unlocked>,
645        inner_state: &OrderedMutex<FastRPCFileState, FastrpcInnerState>,
646        merged_buffers: &Vec<BufferWithMergeInfo>,
647        remote_bufs: &Vec<linux_uapi::remote_buf>,
648        fd_vmos: &mut Option<Vec<Option<zx::Vmo>>>,
649        inbufs: u32,
650    ) -> Result<PayloadInformation, Errno> {
651        let payload_size = Self::get_payload_size(fd_vmos, &merged_buffers)?;
652        let payload_buffer = if payload_size == 0 {
653            None
654        } else {
655            let payload_buffer =
656                inner_state.lock(locked).payload_vmos.pop_front().ok_or_else(|| errno!(ENOBUFS))?;
657            log_debug!("selected payload buffer {}", payload_buffer.id);
658            Some(payload_buffer)
659        };
660
661        // Construct these with the usize buffer_index so we can sort them after.
662        //
663        // Output is specified twice, once for the fidl invocation, the other
664        // to be used after the invocation is done since we want to copy data back
665        // to the user.
666        let mut input_args: Vec<(usize, frpc::ArgumentEntry)> = vec![];
667        let mut output_args: Vec<(usize, frpc::ArgumentEntry)> = vec![];
668        let mut output_info: Vec<(usize, OutputArgumentInfo)> = vec![];
669        let mut curr_merge_point = 0;
670
671        for merged_buffer in merged_buffers {
672            let buf =
673                remote_bufs.get(merged_buffer.buffer_index).expect("to have index in remote bufs");
674            let is_mapped = Self::is_buffer_mapped(fd_vmos, merged_buffer.buffer_index);
675
676            let (entry, offset) = if is_mapped {
677                let (offset, vmo) = Self::get_mapped_memory_and_offset(
678                    current_task,
679                    buf,
680                    fd_vmos,
681                    merged_buffer.buffer_index,
682                )?;
683                (
684                    frpc::ArgumentEntry::VmoArgument(frpc::VmoArgument {
685                        vmo,
686                        offset,
687                        length: buf.len,
688                    }),
689                    offset,
690                )
691            } else {
692                if merged_buffer.merge_offset == 0 {
693                    curr_merge_point = fastrpc_align(curr_merge_point)?;
694                }
695
696                let offset = curr_merge_point - merged_buffer.merge_offset;
697                curr_merge_point = curr_merge_point
698                    .checked_add(merged_buffer.merge_contribution)
699                    .ok_or_else(|| errno!(EOVERFLOW))?;
700                (frpc::ArgumentEntry::Argument(frpc::Argument { offset, length: buf.len }), offset)
701            };
702
703            if merged_buffer.buffer_index < inbufs as usize {
704                // Write data and flush for non-empty, non-mapped input buffers.
705                if !is_mapped && buf.len > 0 {
706                    let buf_data = current_task.read_buffer(&UserBuffer {
707                        address: buf.pv.into(),
708                        length: buf.len as usize,
709                    })?;
710
711                    let vmo = &payload_buffer.as_ref().expect("payload buffer").vmo;
712                    vmo.write(buf_data.as_slice(), offset)
713                        .map_err(|e| zx_status_to_errno("get_payload_info write", e))?;
714                }
715
716                input_args.push((merged_buffer.buffer_index, entry));
717            } else {
718                output_args.push((merged_buffer.buffer_index, entry));
719                output_info.push((
720                    merged_buffer.buffer_index,
721                    OutputArgumentInfo { mapped: is_mapped, offset, length: buf.len },
722                ));
723            }
724        }
725
726        input_args.sort_by_key(|e| e.0);
727        output_args.sort_by_key(|e| e.0);
728        output_info.sort_by_key(|e| e.0);
729
730        let input_args = input_args.into_iter().map(|e| e.1).collect();
731        let output_args = output_args.into_iter().map(|e| e.1).collect();
732        let output_info = output_info.into_iter().map(|e| e.1).collect();
733
734        Ok(PayloadInformation { payload_buffer, input_args, output_args, output_info })
735    }
736
737    fn process_out_bufs(
738        &self,
739        current_task: &CurrentTask,
740        remote_bufs: &Vec<linux_uapi::remote_buf>,
741        payload_vmo: &zx::Vmo,
742        output_infos: &Vec<OutputArgumentInfo>,
743        inbufs: u32,
744    ) -> Result<(), Errno> {
745        let max_len = output_infos.iter().filter(|i| !i.mapped).map(|i| i.length).max();
746        let Some(max_len) = max_len else {
747            return Ok(());
748        };
749
750        let mut read_vec = vec![0; max_len as usize];
751
752        for (output_index, output_info) in output_infos.iter().enumerate() {
753            if output_info.mapped {
754                continue;
755            }
756            if output_info.length == 0 {
757                continue;
758            }
759
760            let buf = &remote_bufs[output_index + inbufs as usize];
761
762            assert_eq!(buf.len, output_info.length);
763
764            payload_vmo
765                .read(&mut read_vec[0..output_info.length as usize], output_info.offset)
766                .map_err(|e| zx_status_to_errno("process_out_bufs read", e))?;
767
768            let _ = current_task
769                .write_memory(buf.pv.into(), &read_vec[0..output_info.length as usize])?;
770        }
771        Ok(())
772    }
773}
774
775impl FileOps for FastRPCFile {
776    fileops_impl_noop_sync!();
777    fileops_impl_seekless!();
778    fileops_impl_dataless!();
779
780    fn close(
781        self: Box<Self>,
782        locked: &mut Locked<FileOpsCore>,
783        _file: &FileObjectState,
784        _current_task: &CurrentTask,
785    ) {
786        let inner = self.inner_state.lock(locked);
787        if let Some(ref session) = inner.session {
788            session.close().expect("session close message send");
789            let evnt = session.wait_for_event(zx::MonotonicInstant::INFINITE);
790            match evnt {
791                Ok(evnt) => {
792                    log_error!("Received unexpected session event after close request: {:?}", evnt);
793                }
794                Err(e) => {
795                    if !e.is_closed() {
796                        log_error!("Received unexpected error after close request: {:?}", e);
797                    }
798                }
799            }
800        }
801    }
802
803    fn ioctl(
804        &self,
805        locked: &mut Locked<Unlocked>,
806        file: &FileObject,
807        current_task: &CurrentTask,
808        request: u32,
809        arg: SyscallArg,
810    ) -> Result<SyscallResult, Errno> {
811        let pid = current_task.thread_group_key.clone();
812        if pid != self.pid_open {
813            return error!(EPERM);
814        }
815
816        match canonicalize_ioctl_request(current_task, request) {
817            linux_uapi::FASTRPC_IOCTL_INVOKE | linux_uapi::FASTRPC_IOCTL_INVOKE_FD => {
818                self.invoke(current_task, locked, request, arg)
819            }
820            linux_uapi::FASTRPC_IOCTL_GETINFO => {
821                let user_info = UserRef::<u32>::from(arg);
822                let channel_id = current_task.read_object(user_info)?;
823                let device_channel_id = self
824                    .device
825                    .get_channel_id(zx::MonotonicInstant::INFINITE)
826                    .map_err(|e| fidl_error_to_errno("get_channel_id call", e))?
827                    .map_err(|e| zx_i32_to_errno("get_channel_id", e))?;
828
829                if device_channel_id != channel_id {
830                    return error!(EPERM);
831                }
832
833                let mut inner = self.inner_state.lock(locked);
834                if inner.session.is_some() {
835                    return error!(EEXIST);
836                }
837
838                inner.pid = Some(pid);
839                inner.cid = Some(channel_id as i32);
840
841                log_debug!("FastRPC ioctl getinfo for channel_id {}", channel_id);
842
843                // The reply value indicates to the user whether the smmu
844                // is enabled for this session. On Fuchsia currently we enable the smmu in a
845                // passthrough mode and hardcode a stream id. Eventually when we fully enable the
846                // smmu we will need to allocate and use specific context banks for sessions so
847                // this value will need to come from the driver.
848                current_task.write_object(user_info, &(1u32))?;
849                Ok(SUCCESS)
850            }
851            linux_uapi::FASTRPC_IOCTL_GET_DSP_INFO => {
852                // UserRef note:
853                // fastrpc_ioctl_capability is checked for check_arch_independent_layout.
854                let user_ref = UserRef::<linux_uapi::fastrpc_ioctl_capability>::new(arg.into());
855                let mut info = current_task.read_object(user_ref)?;
856                log_debug!(
857                    "FastRPC ioctl get dsp info domain {} attribute {}",
858                    info.domain,
859                    info.attribute_ID
860                );
861                info.capability = self.get_capabilities(info.domain, info.attribute_ID as usize)?;
862                current_task.write_object(user_ref, &info)?;
863                Ok(SUCCESS)
864            }
865            linux_uapi::FASTRPC_IOCTL_INVOKE2 => {
866                let info =
867                    current_task.read_multi_arch_object(IoctlInvoke2Ptr::new(current_task, arg))?;
868                if info.req > INVOKE2_MAX {
869                    log_debug!("FastRPC ioctl invoke2 out of bounds req number {}", info.req);
870                    return error!(ENOTTY);
871                }
872
873                log_debug!("FastRPC ioctl invoke2 {:?}", info);
874                error!(ENOSYS)
875            }
876            linux_uapi::FASTRPC_IOCTL_INIT => {
877                let info =
878                    current_task.read_multi_arch_object(IoctlInitPtr::new(current_task, arg))?;
879
880                if info.filelen >= INIT_FILELEN_MAX || info.memlen >= INIT_MEMLEN_MAX {
881                    return error!(EFBIG);
882                }
883
884                let mut inner = self.inner_state.lock(locked);
885                if inner.session.is_some() {
886                    return error!(EEXIST);
887                }
888
889                match info.flags {
890                    FASTRPC_INIT_ATTACH => {
891                        log_debug!("FastRPC ioctl init FASTRPC_INIT_ATTACH {:?}", info);
892
893                        let (client, server) =
894                            fidl::endpoints::create_sync_proxy::<frpc::RemoteDomainMarker>();
895
896                        self.device
897                            .attach_root_domain(server, zx::MonotonicInstant::INFINITE)
898                            .map_err(|e| fidl_error_to_errno("attach_root_domain call", e))?
899                            .map_err(|e| retval_i32_to_errno("attach_root_domain", e))?;
900
901                        inner.payload_vmos = client
902                            .get_payload_buffer_set(3, zx::MonotonicInstant::INFINITE)
903                            .map_err(|e| fidl_error_to_errno("get_payload_buffer_set call", e))?
904                            .map_err(|e| zx_i32_to_errno("get_payload_buffer_set", e))?
905                            .into();
906
907                        inner.session = Some(Arc::new(client));
908                        Ok(SUCCESS)
909                    }
910                    FASTRPC_INIT_CREATE_STATIC => {
911                        log_debug!("FastRPC ioctl init FASTRPC_INIT_CREATE_STATIC {:?}", info);
912                        let file_name = current_task.read_c_string_to_vec(
913                            UserCString::new(current_task, info.file),
914                            info.filelen as usize,
915                        )?;
916
917                        let (client, server) =
918                            fidl::endpoints::create_sync_proxy::<frpc::RemoteDomainMarker>();
919
920                        self.device
921                            .create_static_domain(
922                                file_name.to_str().map_err(|_| errno!(EINVAL))?,
923                                info.memlen,
924                                server,
925                                zx::MonotonicInstant::INFINITE,
926                            )
927                            .map_err(|e| fidl_error_to_errno("create_static_domain call", e))?
928                            .map_err(|e| retval_i32_to_errno("create_static_domain", e))?;
929
930                        inner.payload_vmos = client
931                            .get_payload_buffer_set(3, zx::MonotonicInstant::INFINITE)
932                            .map_err(|e| fidl_error_to_errno("get_payload_buffer_set call", e))?
933                            .map_err(|e| zx_i32_to_errno("get_payload_buffer_set", e))?
934                            .into();
935
936                        inner.session = Some(Arc::new(client));
937                        Ok(SUCCESS)
938                    }
939                    _ => {
940                        log_warn!("FastRPC ioctl init with unsupported flag {:?}", info);
941                        error!(ENOSYS)
942                    }
943                }
944            }
945            _ => default_ioctl(file, locked, current_task, request, arg),
946        }
947    }
948}
949
950#[derive(Clone)]
951struct FastRPCDevice {
952    device: Arc<frpc::SecureFastRpcSynchronousProxy>,
953    cached_capabilities: Arc<OnceLock<[u32; FASTRPC_MAX_DSP_ATTRIBUTES]>>,
954}
955
956impl FastRPCDevice {
957    fn new(device: Arc<frpc::SecureFastRpcSynchronousProxy>) -> Self {
958        Self { device, cached_capabilities: Arc::new(OnceLock::new()) }
959    }
960}
961
962impl DeviceOps for FastRPCDevice {
963    fn open(
964        &self,
965        _locked: &mut Locked<FileOpsCore>,
966        current_task: &CurrentTask,
967        _id: DeviceType,
968        _node: &NamespaceNode,
969        _flags: OpenFlags,
970    ) -> Result<Box<dyn FileOps>, Errno> {
971        Ok(Box::new(FastRPCFile::new(
972            current_task.thread_group_key.clone(),
973            self.device.clone(),
974            self.cached_capabilities.clone(),
975        )))
976    }
977}
978
979pub fn fastrpc_device_init(locked: &mut Locked<Unlocked>, system_task: &CurrentTask) {
980    let device = fuchsia_component::client::connect_to_protocol_sync::<frpc::SecureFastRpcMarker>()
981        .expect("Failed to connect to fuchsia.hardware.qualcomm.fastrpc.SecureFastRpc");
982
983    let device = Arc::new(device);
984
985    // This is called the "system" dma heap, but as of now the fastrpc client is its only client.
986    // Because fastrpc needs to be aware of the fds from this, we are putting the implementation
987    // in this module.
988    dma_heap_device_register(locked, system_task, "system", SystemHeap { device: device.clone() });
989
990    let device = FastRPCDevice::new(device);
991    let registry = &system_task.kernel().device_registry;
992    registry
993        .register_dyn_device(
994            locked,
995            system_task,
996            "adsprpc-smd-secure".into(),
997            registry.objects.get_or_create_class("fastrpc".into(), registry.objects.virtual_bus()),
998            device,
999        )
1000        .expect("Can register heap device");
1001}
1002
1003#[cfg(test)]
1004pub mod tests {
1005    use crate::fastrpc::{BufferWithMergeInfo, FastRPCFile, FastRPCFileState};
1006    use fidl_fuchsia_hardware_qualcomm_fastrpc::{
1007        Argument, ArgumentEntry, SharedPayloadBuffer, VmoArgument,
1008    };
1009    use linux_uapi::{remote_buf, uaddr};
1010    use starnix_core::mm::ProtectionFlags;
1011    use starnix_core::testing::{UserMemoryWriter, map_memory, spawn_kernel_and_run};
1012    use starnix_sync::OrderedMutex;
1013    use starnix_types::PAGE_SIZE;
1014    use starnix_uapi::user_address::UserAddress;
1015    use zx::HandleBased;
1016
1017    #[fuchsia::test]
1018    fn merge_buffers_test_empty_input() {
1019        let remote_bufs: Vec<remote_buf> = vec![];
1020        let fd_vmos = None;
1021        let results = FastRPCFile::merge_buffers(&fd_vmos, &remote_bufs).expect("merge to succeed");
1022        assert!(results.is_empty());
1023    }
1024
1025    #[fuchsia::test]
1026    fn merge_buffers_test_single_buffer() {
1027        let remote_bufs = vec![remote_buf { pv: uaddr { addr: 100 }, len: 50 }];
1028        let fd_vmos = None;
1029        let results = FastRPCFile::merge_buffers(&fd_vmos, &remote_bufs).expect("merge to succeed");
1030        assert_eq!(
1031            results,
1032            vec![BufferWithMergeInfo {
1033                start: 100,
1034                end: 150,
1035                buffer_index: 0,
1036                merge_contribution: 50,
1037                merge_offset: 0,
1038            }]
1039        );
1040    }
1041
1042    #[fuchsia::test]
1043    fn merge_buffers_test_disjoint_buffers_sorted_input() {
1044        let remote_bufs = vec![
1045            remote_buf { pv: uaddr { addr: 100 }, len: 50 },
1046            remote_buf { pv: uaddr { addr: 200 }, len: 50 },
1047        ];
1048        let fd_vmos = None;
1049        let results = FastRPCFile::merge_buffers(&fd_vmos, &remote_bufs).expect("merge to succeed");
1050        assert_eq!(
1051            results,
1052            vec![
1053                BufferWithMergeInfo {
1054                    start: 100,
1055                    end: 150,
1056                    buffer_index: 0,
1057                    merge_contribution: 50,
1058                    merge_offset: 0
1059                },
1060                BufferWithMergeInfo {
1061                    start: 200,
1062                    end: 250,
1063                    buffer_index: 1,
1064                    merge_contribution: 50,
1065                    merge_offset: 0
1066                },
1067            ]
1068        );
1069    }
1070
1071    #[fuchsia::test]
1072    fn merge_buffers_test_disjoint_buffers_unsorted_input() {
1073        let remote_bufs = vec![
1074            remote_buf { pv: uaddr { addr: 200 }, len: 50 }, // index 0
1075            remote_buf { pv: uaddr { addr: 100 }, len: 50 }, // index 1
1076        ];
1077        let fd_vmos = None;
1078        let results = FastRPCFile::merge_buffers(&fd_vmos, &remote_bufs).expect("merge to succeed");
1079        assert_eq!(
1080            results,
1081            vec![
1082                BufferWithMergeInfo {
1083                    start: 100,
1084                    end: 150,
1085                    buffer_index: 1,
1086                    merge_contribution: 50,
1087                    merge_offset: 0
1088                },
1089                BufferWithMergeInfo {
1090                    start: 200,
1091                    end: 250,
1092                    buffer_index: 0,
1093                    merge_contribution: 50,
1094                    merge_offset: 0
1095                },
1096            ]
1097        );
1098    }
1099
1100    #[fuchsia::test]
1101    fn merge_buffers_test_touching_buffers() {
1102        let remote_bufs = vec![
1103            remote_buf { pv: uaddr { addr: 100 }, len: 50 },
1104            remote_buf { pv: uaddr { addr: 150 }, len: 50 },
1105        ];
1106        let fd_vmos = None;
1107        let results = FastRPCFile::merge_buffers(&fd_vmos, &remote_bufs).expect("merge to succeed");
1108        assert_eq!(
1109            results,
1110            vec![
1111                BufferWithMergeInfo {
1112                    start: 100,
1113                    end: 150,
1114                    buffer_index: 0,
1115                    merge_contribution: 50,
1116                    merge_offset: 0
1117                },
1118                BufferWithMergeInfo {
1119                    start: 150,
1120                    end: 200,
1121                    buffer_index: 1,
1122                    merge_contribution: 50,
1123                    merge_offset: 0
1124                },
1125            ]
1126        );
1127    }
1128
1129    #[fuchsia::test]
1130    fn merge_buffers_test_touching_buffers_one_mapped() {
1131        let remote_bufs = vec![
1132            remote_buf { pv: uaddr { addr: 100 }, len: 50 },
1133            remote_buf { pv: uaddr { addr: 150 }, len: 50 },
1134        ];
1135        let fd_vmos = Some(vec![None, Some(zx::Vmo::create(1).expect("vmo"))]);
1136        let results = FastRPCFile::merge_buffers(&fd_vmos, &remote_bufs).expect("merge to succeed");
1137        assert_eq!(
1138            results,
1139            vec![
1140                BufferWithMergeInfo {
1141                    start: 100,
1142                    end: 150,
1143                    buffer_index: 0,
1144                    merge_contribution: 50,
1145                    merge_offset: 0
1146                },
1147                BufferWithMergeInfo {
1148                    start: 150,
1149                    end: 200,
1150                    buffer_index: 1,
1151                    merge_contribution: 00,
1152                    merge_offset: 0
1153                },
1154            ]
1155        );
1156    }
1157
1158    #[fuchsia::test]
1159    fn merge_buffers_test_partial_overlap() {
1160        let remote_bufs = vec![
1161            remote_buf { pv: uaddr { addr: 100 }, len: 100 },
1162            remote_buf { pv: uaddr { addr: 150 }, len: 100 },
1163        ];
1164        let fd_vmos = None;
1165        let results = FastRPCFile::merge_buffers(&fd_vmos, &remote_bufs).expect("merge to succeed");
1166        assert_eq!(
1167            results,
1168            vec![
1169                BufferWithMergeInfo {
1170                    start: 100,
1171                    end: 200,
1172                    buffer_index: 0,
1173                    merge_contribution: 100,
1174                    merge_offset: 0
1175                },
1176                BufferWithMergeInfo {
1177                    start: 150,
1178                    end: 250,
1179                    buffer_index: 1,
1180                    merge_contribution: 50,
1181                    merge_offset: 50
1182                },
1183            ]
1184        );
1185    }
1186
1187    #[fuchsia::test]
1188    fn merge_buffers_test_full_containment() {
1189        let remote_bufs = vec![
1190            remote_buf { pv: uaddr { addr: 100 }, len: 100 },
1191            remote_buf { pv: uaddr { addr: 120 }, len: 50 },
1192        ];
1193        let fd_vmos = None;
1194        let results = FastRPCFile::merge_buffers(&fd_vmos, &remote_bufs).expect("merge to succeed");
1195        assert_eq!(
1196            results,
1197            vec![
1198                BufferWithMergeInfo {
1199                    start: 100,
1200                    end: 200,
1201                    buffer_index: 0,
1202                    merge_contribution: 100,
1203                    merge_offset: 0
1204                },
1205                BufferWithMergeInfo {
1206                    start: 120,
1207                    end: 170,
1208                    buffer_index: 1,
1209                    merge_contribution: 0,
1210                    merge_offset: 80
1211                },
1212            ]
1213        );
1214    }
1215
1216    #[fuchsia::test]
1217    fn merge_buffers_test_same_start_address() {
1218        let remote_bufs = vec![
1219            remote_buf { pv: uaddr { addr: 100 }, len: 50 },
1220            remote_buf { pv: uaddr { addr: 100 }, len: 100 },
1221        ];
1222        let fd_vmos = None;
1223        let results = FastRPCFile::merge_buffers(&fd_vmos, &remote_bufs).expect("merge to succeed");
1224        assert_eq!(
1225            results,
1226            vec![
1227                BufferWithMergeInfo {
1228                    start: 100,
1229                    end: 200,
1230                    buffer_index: 1,
1231                    merge_contribution: 100,
1232                    merge_offset: 0
1233                },
1234                BufferWithMergeInfo {
1235                    start: 100,
1236                    end: 150,
1237                    buffer_index: 0,
1238                    merge_contribution: 0,
1239                    merge_offset: 100
1240                },
1241            ]
1242        );
1243    }
1244
1245    #[fuchsia::test]
1246    fn merge_buffers_test_zero_length_buffers() {
1247        let remote_bufs = vec![
1248            remote_buf { pv: uaddr { addr: 100 }, len: 50 },
1249            remote_buf { pv: uaddr { addr: 120 }, len: 0 },
1250            remote_buf { pv: uaddr { addr: 200 }, len: 0 },
1251        ];
1252        let fd_vmos = None;
1253        let results = FastRPCFile::merge_buffers(&fd_vmos, &remote_bufs).expect("merge to succeed");
1254        assert_eq!(
1255            results,
1256            vec![
1257                BufferWithMergeInfo {
1258                    start: 100,
1259                    end: 150,
1260                    buffer_index: 0,
1261                    merge_contribution: 50,
1262                    merge_offset: 0
1263                },
1264                BufferWithMergeInfo {
1265                    start: 120,
1266                    end: 120,
1267                    buffer_index: 1,
1268                    merge_contribution: 0,
1269                    merge_offset: 30
1270                },
1271                BufferWithMergeInfo {
1272                    start: 200,
1273                    end: 200,
1274                    buffer_index: 2,
1275                    merge_contribution: 0,
1276                    merge_offset: 0
1277                },
1278            ]
1279        );
1280    }
1281
1282    #[fuchsia::test]
1283    fn merge_buffers_test_complex() {
1284        let remote_bufs = vec![
1285            remote_buf { pv: uaddr { addr: 500 }, len: 100 }, // 500-600, index 0
1286            remote_buf { pv: uaddr { addr: 100 }, len: 100 }, // 100-200, index 1
1287            remote_buf { pv: uaddr { addr: 150 }, len: 100 }, // 150-250, index 2
1288            remote_buf { pv: uaddr { addr: 400 }, len: 50 },  // 400-450, index 3
1289            remote_buf { pv: uaddr { addr: 180 }, len: 20 },  // 180-200, index 4 (contained)
1290        ];
1291        let fd_vmos = None;
1292        let results = FastRPCFile::merge_buffers(&fd_vmos, &remote_bufs).expect("merge to succeed");
1293        let expected = vec![
1294            // First merge region (100 -> 200 -> 250)
1295            BufferWithMergeInfo {
1296                start: 100,
1297                end: 200,
1298                buffer_index: 1,
1299                merge_contribution: 100,
1300                merge_offset: 0,
1301            },
1302            BufferWithMergeInfo {
1303                start: 150,
1304                end: 250,
1305                buffer_index: 2,
1306                merge_contribution: 50,
1307                merge_offset: 50,
1308            },
1309            BufferWithMergeInfo {
1310                start: 180,
1311                end: 200,
1312                buffer_index: 4,
1313                merge_contribution: 0,
1314                merge_offset: 70,
1315            },
1316            // Second merge region (400 -> 450)
1317            BufferWithMergeInfo {
1318                start: 400,
1319                end: 450,
1320                buffer_index: 3,
1321                merge_contribution: 50,
1322                merge_offset: 0,
1323            },
1324            // Third merge region (500 -> 600)
1325            BufferWithMergeInfo {
1326                start: 500,
1327                end: 600,
1328                buffer_index: 0,
1329                merge_contribution: 100,
1330                merge_offset: 0,
1331            },
1332        ];
1333
1334        assert_eq!(results, expected);
1335    }
1336
1337    #[fuchsia::test]
1338    async fn get_payload_info_test_complex_range_values() {
1339        spawn_kernel_and_run(async |locked, current_task| {
1340            let addr = map_memory(locked, &current_task, UserAddress::from_ptr(100 as usize), 500);
1341
1342            // Use the same buffers as merge_buffers_test_complex but just offset them in the
1343            // memory we got mapped above.
1344            let remote_bufs = vec![
1345                remote_buf { pv: uaddr { addr: (addr + 500u64).expect("add").into() }, len: 100 },
1346                remote_buf { pv: uaddr { addr: (addr + 100u64).expect("add").into() }, len: 100 },
1347                remote_buf { pv: uaddr { addr: (addr + 150u64).expect("add").into() }, len: 100 },
1348                remote_buf { pv: uaddr { addr: (addr + 400u64).expect("add").into() }, len: 50 },
1349                remote_buf { pv: uaddr { addr: (addr + 180u64).expect("add").into() }, len: 20 },
1350            ];
1351
1352            // This variant of the test puts range based values into the user memory.
1353            let mut writer = UserMemoryWriter::new(&current_task, remote_bufs[0].pv.into());
1354            let data = (0..remote_bufs[0].len as u8).collect::<Vec<_>>();
1355            writer.write(&data);
1356
1357            let mut writer = UserMemoryWriter::new(&current_task, remote_bufs[1].pv.into());
1358            let data = (0..remote_bufs[1].len as u8).collect::<Vec<_>>();
1359            writer.write(&data);
1360
1361            let mut writer = UserMemoryWriter::new(&current_task, remote_bufs[2].pv.into());
1362            let data = (0..remote_bufs[2].len as u8).collect::<Vec<_>>();
1363            writer.write(&data);
1364
1365            let mut writer = UserMemoryWriter::new(&current_task, remote_bufs[3].pv.into());
1366            let data = (0..remote_bufs[3].len as u8).collect::<Vec<_>>();
1367            writer.write(&data);
1368
1369            let mut writer = UserMemoryWriter::new(&current_task, remote_bufs[4].pv.into());
1370            let data = (0..remote_bufs[4].len as u8).collect::<Vec<_>>();
1371            writer.write(&data);
1372
1373            let vmo = zx::Vmo::create(*PAGE_SIZE).expect("vmo create");
1374            let vmo_dup = vmo.duplicate_handle(fidl::Rights::SAME_RIGHTS).expect("dup");
1375
1376            let state = OrderedMutex::new(FastRPCFileState {
1377                session: None,
1378                payload_vmos: vec![SharedPayloadBuffer { id: 1, vmo: vmo }].into(),
1379                cid: None,
1380                pid: None,
1381            });
1382            let mut fd_vmos = None;
1383
1384            let merged_buffers =
1385                FastRPCFile::merge_buffers(&fd_vmos, &remote_bufs).expect("merge to succeed");
1386            let payload_info = FastRPCFile::get_payload_info(
1387                &current_task,
1388                locked,
1389                &state,
1390                &merged_buffers,
1391                &remote_bufs,
1392                &mut fd_vmos,
1393                3,
1394            )
1395            .expect("get_payload_info");
1396
1397            assert_eq!(
1398                payload_info.input_args,
1399                vec![
1400                    ArgumentEntry::Argument(Argument { offset: 384, length: 100 }),
1401                    ArgumentEntry::Argument(Argument { offset: 0, length: 100 }),
1402                    ArgumentEntry::Argument(Argument { offset: 50, length: 100 })
1403                ]
1404            );
1405
1406            assert_eq!(
1407                payload_info.output_args,
1408                vec![
1409                    ArgumentEntry::Argument(Argument { offset: 256, length: 50 }),
1410                    ArgumentEntry::Argument(Argument { offset: 80, length: 20 }),
1411                ]
1412            );
1413
1414            // Tests that the input buffers have been correctly setup in the payload.
1415            //
1416            // Since the buffer at 256 is part of the output, it will not be copied into the vmo as
1417            // part of the setup. But because 80-100 is already included as part of the input buffer
1418            // from 0-100 and 50-150 the data appears in here just as a side effect.
1419            let data = vmo_dup.read_to_vec::<u8>(0, 484).expect("read");
1420            let expected_vmo = vec![
1421                0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
1422                23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
1423                44, 45, 46, 47, 48, 49, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
1424                17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
1425                10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
1426                61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
1427                82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 0, 0, 0,
1428                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1429                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1430                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1431                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1432                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1433                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1434                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1435                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1436                0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
1437                19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
1438                40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
1439                61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
1440                82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
1441            ];
1442
1443            assert_eq!(expected_vmo, data);
1444        })
1445        .await;
1446    }
1447
1448    #[fuchsia::test]
1449    async fn get_payload_info_test_complex_single_values() {
1450        spawn_kernel_and_run(async |locked, current_task| {
1451            let addr = map_memory(locked, &current_task, UserAddress::from_ptr(100 as usize), 500);
1452
1453            // Use the same buffers as merge_buffers_test_complex but just offset them in the
1454            // memory we got mapped above.
1455            let remote_bufs = vec![
1456                remote_buf { pv: uaddr { addr: (addr + 500u64).expect("add").into() }, len: 100 },
1457                remote_buf { pv: uaddr { addr: (addr + 100u64).expect("add").into() }, len: 100 },
1458                remote_buf { pv: uaddr { addr: (addr + 150u64).expect("add").into() }, len: 100 },
1459                remote_buf { pv: uaddr { addr: (addr + 400u64).expect("add").into() }, len: 50 },
1460                remote_buf { pv: uaddr { addr: (addr + 180u64).expect("add").into() }, len: 20 },
1461            ];
1462
1463            // This variant of the test puts single values based on the buffer index
1464            // into the user memory.
1465            let mut writer = UserMemoryWriter::new(&current_task, remote_bufs[0].pv.into());
1466            let data = vec![10; remote_bufs[0].len as usize];
1467            writer.write(&data);
1468
1469            let mut writer = UserMemoryWriter::new(&current_task, remote_bufs[1].pv.into());
1470            let data = vec![11; remote_bufs[1].len as usize];
1471            writer.write(&data);
1472
1473            let mut writer = UserMemoryWriter::new(&current_task, remote_bufs[2].pv.into());
1474            let data = vec![12; remote_bufs[2].len as usize];
1475            writer.write(&data);
1476
1477            let mut writer = UserMemoryWriter::new(&current_task, remote_bufs[3].pv.into());
1478            let data = vec![13; remote_bufs[3].len as usize];
1479            writer.write(&data);
1480
1481            let mut writer = UserMemoryWriter::new(&current_task, remote_bufs[4].pv.into());
1482            let data = vec![14; remote_bufs[4].len as usize];
1483            writer.write(&data);
1484
1485            let vmo = zx::Vmo::create(*PAGE_SIZE).expect("vmo create");
1486            let vmo_dup = vmo.duplicate_handle(fidl::Rights::SAME_RIGHTS).expect("dup");
1487
1488            let state = OrderedMutex::new(FastRPCFileState {
1489                session: None,
1490                payload_vmos: vec![SharedPayloadBuffer { id: 1, vmo: vmo }].into(),
1491                cid: None,
1492                pid: None,
1493            });
1494            let mut fd_vmos = None;
1495
1496            let merged_buffers =
1497                FastRPCFile::merge_buffers(&fd_vmos, &remote_bufs).expect("merge to succeed");
1498            let payload_info = FastRPCFile::get_payload_info(
1499                &current_task,
1500                locked,
1501                &state,
1502                &merged_buffers,
1503                &remote_bufs,
1504                &mut fd_vmos,
1505                3,
1506            )
1507            .expect("get_payload_info");
1508
1509            assert_eq!(
1510                payload_info.input_args,
1511                vec![
1512                    ArgumentEntry::Argument(Argument { offset: 384, length: 100 }),
1513                    ArgumentEntry::Argument(Argument { offset: 0, length: 100 }),
1514                    ArgumentEntry::Argument(Argument { offset: 50, length: 100 })
1515                ]
1516            );
1517
1518            assert_eq!(
1519                payload_info.output_args,
1520                vec![
1521                    ArgumentEntry::Argument(Argument { offset: 256, length: 50 }),
1522                    ArgumentEntry::Argument(Argument { offset: 80, length: 20 }),
1523                ]
1524            );
1525
1526            // Tests that the input buffers have been correctly setup in the payload.
1527            //
1528            // Since the buffer at 256 is part of the output, it will not be copied into the vmo as
1529            // part of the setup. But because 80-100 is already included as part of the input buffer
1530            // from 0-100 and 50-150 the data appears in here just as a side effect.
1531            let data = vmo_dup.read_to_vec::<u8>(0, 484).expect("read");
1532            let expected_vmo = vec![
1533                11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
1534                11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
1535                11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
1536                12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 14, 14, 14, 14,
1537                14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 12, 12, 12, 12, 12,
1538                12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
1539                12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
1540                12, 12, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1541                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1542                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1543                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1544                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1545                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1546                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1547                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1548                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
1549                10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
1550                10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
1551                10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
1552                10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
1553                10, 10, 10, 10, 10, 10,
1554            ];
1555
1556            assert_eq!(expected_vmo, data);
1557        })
1558        .await;
1559    }
1560
1561    #[fuchsia::test]
1562    async fn get_payload_info_test_complex_single_values_with_one_mapped() {
1563        spawn_kernel_and_run(async |locked, current_task| {
1564            let addr = map_memory(locked, &current_task, UserAddress::from_ptr(100 as usize), 400);
1565
1566            let mapped_addr = starnix_core::testing::map_memory_anywhere(locked, current_task, 100);
1567            let (mm_vmo, _mm_offset) = current_task
1568                .mm()
1569                .unwrap()
1570                .get_mapping_memory(mapped_addr, ProtectionFlags::READ | ProtectionFlags::WRITE)
1571                .expect("mem");
1572
1573            // Use the same buffers as merge_buffers_test_complex but just offset them in the
1574            // memory we got mapped above.
1575            let remote_bufs = vec![
1576                remote_buf { pv: uaddr { addr: mapped_addr.into() }, len: 100 },
1577                remote_buf { pv: uaddr { addr: (addr + 100u64).expect("add").into() }, len: 100 },
1578                remote_buf { pv: uaddr { addr: (addr + 150u64).expect("add").into() }, len: 100 },
1579                remote_buf { pv: uaddr { addr: (addr + 400u64).expect("add").into() }, len: 50 },
1580                remote_buf { pv: uaddr { addr: (addr + 180u64).expect("add").into() }, len: 20 },
1581            ];
1582
1583            // This variant of the test puts single values based on the buffer index
1584            // into the user memory.
1585            let mut writer = UserMemoryWriter::new(&current_task, mapped_addr.into());
1586            let data = vec![10; remote_bufs[0].len as usize];
1587            writer.write(&data);
1588
1589            let mut writer = UserMemoryWriter::new(&current_task, remote_bufs[1].pv.into());
1590            let data = vec![11; remote_bufs[1].len as usize];
1591            writer.write(&data);
1592
1593            let mut writer = UserMemoryWriter::new(&current_task, remote_bufs[2].pv.into());
1594            let data = vec![12; remote_bufs[2].len as usize];
1595            writer.write(&data);
1596
1597            let mut writer = UserMemoryWriter::new(&current_task, remote_bufs[3].pv.into());
1598            let data = vec![13; remote_bufs[3].len as usize];
1599            writer.write(&data);
1600
1601            let mut writer = UserMemoryWriter::new(&current_task, remote_bufs[4].pv.into());
1602            let data = vec![14; remote_bufs[4].len as usize];
1603            writer.write(&data);
1604
1605            let vmo = zx::Vmo::create(*PAGE_SIZE).expect("vmo create");
1606            let vmo_dup = vmo.duplicate_handle(fidl::Rights::SAME_RIGHTS).expect("dup");
1607
1608            let state = OrderedMutex::new(FastRPCFileState {
1609                session: None,
1610                payload_vmos: vec![SharedPayloadBuffer { id: 1, vmo: vmo }].into(),
1611                cid: None,
1612                pid: None,
1613            });
1614            let mut fd_vmos = Some(vec![
1615                Some(
1616                    mm_vmo
1617                        .as_vmo()
1618                        .unwrap()
1619                        .duplicate_handle(fidl::Rights::SAME_RIGHTS)
1620                        .expect("dup"),
1621                ),
1622                None,
1623                None,
1624                None,
1625                None,
1626            ]);
1627
1628            let merged_buffers =
1629                FastRPCFile::merge_buffers(&fd_vmos, &remote_bufs).expect("merge to succeed");
1630            let payload_info = FastRPCFile::get_payload_info(
1631                &current_task,
1632                locked,
1633                &state,
1634                &merged_buffers,
1635                &remote_bufs,
1636                &mut fd_vmos,
1637                3,
1638            )
1639            .expect("get_payload_info");
1640
1641            let ArgumentEntry::VmoArgument(VmoArgument { vmo: _vmo, offset: _offset, length }) =
1642                &payload_info.input_args[0]
1643            else {
1644                panic!("wrong type")
1645            };
1646
1647            assert_eq!(length, &100u64);
1648
1649            assert_eq!(
1650                payload_info.input_args[1..3],
1651                vec![
1652                    ArgumentEntry::Argument(Argument { offset: 0, length: 100 }),
1653                    ArgumentEntry::Argument(Argument { offset: 50, length: 100 })
1654                ]
1655            );
1656
1657            assert_eq!(
1658                payload_info.output_args,
1659                vec![
1660                    ArgumentEntry::Argument(Argument { offset: 256, length: 50 }),
1661                    ArgumentEntry::Argument(Argument { offset: 80, length: 20 }),
1662                ]
1663            );
1664
1665            // Tests that the input buffers have been correctly setup in the payload.
1666            //
1667            // The buffers at 500 is mapped so it should not appear here.
1668            //
1669            // Since the buffer at 256 is part of the output, it will not be copied into the vmo as
1670            // part of the setup. But because 80-100 is already included as part of the input buffer
1671            // from 0-100 and 50-150 the data appears in here just as a side effect.
1672            let data = vmo_dup.read_to_vec::<u8>(0, 484).expect("read");
1673            let expected_vmo = vec![
1674                11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
1675                11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
1676                11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
1677                12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 14, 14, 14, 14,
1678                14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 12, 12, 12, 12, 12,
1679                12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
1680                12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
1681                12, 12, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1682                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1683                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1684                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1685                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1686                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1687                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1688                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1689                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1690                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1691                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1692                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1693                0, 0,
1694            ];
1695
1696            assert_eq!(expected_vmo, data);
1697        })
1698        .await;
1699    }
1700}