MemoryManager

Struct MemoryManager 

Source
pub struct MemoryManager {
    pub base_addr: UserAddress,
    pub futex: Arc<FutexTable<PrivateFutexKey>>,
    pub state: RwLock<MemoryManagerState>,
    pub dumpable: OrderedMutex<DumpPolicy, MmDumpable>,
    pub maximum_valid_user_address: UserAddress,
    pub inflight_vmspliced_payloads: InflightVmsplicedPayloads,
    pub drop_notifier: DropNotifier,
    /* private fields */
}

Fields§

§base_addr: UserAddress

The base address of the root_vmar.

§futex: Arc<FutexTable<PrivateFutexKey>>

The futexes in this address space.

§state: RwLock<MemoryManagerState>

Mutable state for the memory manager.

§dumpable: OrderedMutex<DumpPolicy, MmDumpable>

Whether this address space is dumpable.

§maximum_valid_user_address: UserAddress

Maximum valid user address for this vmar.

§inflight_vmspliced_payloads: InflightVmsplicedPayloads

In-flight payloads enqueued to a pipe as a consequence of a vmsplice(2) operation.

For details on why we need to keep track of in-flight vmspliced payloads, see VmsplicePayload.

For details on why this isn’t under the RwLock protected MemoryManagerState, See InflightVmsplicedPayloads::payloads.

§drop_notifier: DropNotifier

A mechanism to be notified when this MemoryManager is destroyed.

Implementations§

Source§

impl MemoryManager

Source

pub fn summarize(&self, summary: &mut MappingSummary)

Source

pub fn get_mappings_for_vmsplice( self: &Arc<MemoryManager>, buffers: &UserBuffers, ) -> Result<Vec<Arc<VmsplicePayload>>, Errno>

Source

pub fn has_same_address_space(&self, other: &Self) -> bool

Source

pub fn unified_read_memory<'a>( &self, current_task: &CurrentTask, addr: UserAddress, bytes: &'a mut [MaybeUninit<u8>], ) -> Result<&'a mut [u8], Errno>

Source

pub fn syscall_read_memory<'a>( &self, addr: UserAddress, bytes: &'a mut [MaybeUninit<u8>], ) -> Result<&'a mut [u8], Errno>

Source

pub fn unified_read_memory_partial_until_null_byte<'a>( &self, current_task: &CurrentTask, addr: UserAddress, bytes: &'a mut [MaybeUninit<u8>], ) -> Result<&'a mut [u8], Errno>

Source

pub fn syscall_read_memory_partial_until_null_byte<'a>( &self, addr: UserAddress, bytes: &'a mut [MaybeUninit<u8>], ) -> Result<&'a mut [u8], Errno>

Source

pub fn unified_read_memory_partial<'a>( &self, current_task: &CurrentTask, addr: UserAddress, bytes: &'a mut [MaybeUninit<u8>], ) -> Result<&'a mut [u8], Errno>

Source

pub fn syscall_read_memory_partial<'a>( &self, addr: UserAddress, bytes: &'a mut [MaybeUninit<u8>], ) -> Result<&'a mut [u8], Errno>

Source

pub fn unified_write_memory( &self, current_task: &CurrentTask, addr: UserAddress, bytes: &[u8], ) -> Result<usize, Errno>

Source

pub fn force_write_memory( &self, addr: UserAddress, bytes: &[u8], ) -> Result<(), Errno>

Write bytes to memory address addr, making a copy-on-write child of the VMO backing and replacing the mapping if necessary.

NOTE: this bypasses userspace’s memory protection configuration and should only be called by codepaths like ptrace which bypass memory protection.

Source

pub fn syscall_write_memory( &self, addr: UserAddress, bytes: &[u8], ) -> Result<usize, Errno>

Source

pub fn unified_write_memory_partial( &self, current_task: &CurrentTask, addr: UserAddress, bytes: &[u8], ) -> Result<usize, Errno>

Source

pub fn syscall_write_memory_partial( &self, addr: UserAddress, bytes: &[u8], ) -> Result<usize, Errno>

Source

pub fn unified_zero( &self, current_task: &CurrentTask, addr: UserAddress, length: usize, ) -> Result<usize, Errno>

Source

pub fn syscall_zero( &self, addr: UserAddress, length: usize, ) -> Result<usize, Errno>

Source

pub fn as_remote(self: &Arc<Self>) -> RemoteMemoryManager

Obtain a reference to this memory manager that can be used from another thread.

Source

pub fn cache_flush(&self, range: Range<UserAddress>) -> Result<(), Errno>

Performs a data and instruction cache flush over the given address range.

Source

pub fn register_membarrier_private_expedited( &self, mtype: MembarrierType, ) -> Result<(), Errno>

Register the address space managed by this memory manager for interest in receiving private expedited memory barriers of the given type.

Source

pub fn membarrier_private_expedited_registered( &self, mtype: MembarrierType, ) -> bool

Checks if the address space managed by this memory manager is registered for interest in private expedited barriers of the given kind.

Source§

impl MemoryManager

Source

pub fn new(root_vmar: Vmar) -> Result<Self, Status>

Source

pub fn set_brk<L>( self: &Arc<Self>, locked: &mut Locked<L>, current_task: &CurrentTask, addr: UserAddress, ) -> Result<UserAddress, Errno>

Source

pub fn register_uffd(&self, userfault: &Arc<UserFault>)

Source

pub fn register_with_uffd<L>( self: &Arc<Self>, locked: &mut Locked<L>, addr: UserAddress, length: usize, userfault: &Arc<UserFault>, mode: FaultRegisterMode, ) -> Result<(), Errno>

Register a given memory range with a userfault object.

Source

pub fn unregister_range_from_uffd<L>( &self, locked: &mut Locked<L>, userfault: &Arc<UserFault>, addr: UserAddress, length: usize, ) -> Result<(), Errno>

Unregister a given range from any userfault objects associated with it.

Source

pub fn unregister_uffd<L>( &self, locked: &mut Locked<L>, userfault: &Arc<UserFault>, )

Unregister any mappings registered with a given userfault object. Used when closing the last file descriptor associated to it.

Source

pub fn populate_from_uffd<F, L>( &self, locked: &mut Locked<L>, addr: UserAddress, length: usize, userfault: &Arc<UserFault>, populate: F, ) -> Result<usize, Errno>

Populate a range of pages registered with an userfaulfd according to a populate function. This will fail if the pages were not registered with userfaultfd, or if the page at addr was already populated. If any page other than the first one was populated, the length is adjusted to only include the first N unpopulated pages, and this adjusted length is then passed to populate. On success, returns the number of populated bytes.

Source

pub fn zero_from_uffd<L>( &self, locked: &mut Locked<L>, addr: UserAddress, length: usize, userfault: &Arc<UserFault>, ) -> Result<usize, Errno>

Source

pub fn fill_from_uffd<L>( &self, locked: &mut Locked<L>, addr: UserAddress, buf: &[u8], length: usize, userfault: &Arc<UserFault>, ) -> Result<usize, Errno>

Source

pub fn copy_from_uffd<L>( &self, locked: &mut Locked<L>, source_addr: UserAddress, dst_addr: UserAddress, length: usize, userfault: &Arc<UserFault>, ) -> Result<usize, Errno>

Source

pub fn snapshot_to<L>( &self, locked: &mut Locked<L>, target: &Arc<MemoryManager>, ) -> Result<(), Errno>

Create a snapshot of the memory mapping from self into target. All memory mappings are copied entry-for-entry, and the copies end up at exactly the same addresses.

Source

pub fn exec( &self, exe_node: NamespaceNode, arch_width: ArchWidth, ) -> Result<Arc<Self>, Status>

Returns the replacement MemoryManager to be used by the exec()ing task.

POSIX requires that “a call to any exec function from a process with more than one thread shall result in all threads being terminated and the new executable being loaded and executed. No destructor functions or cleanup handlers shall be called”. The caller is responsible for having ensured that this is the only Task in the ThreadGroup, and thereby the zx::process, such that it is safe to tear-down the Zircon userspace VMAR for the current address-space.

Source

pub fn initialize_mmap_layout(&self, arch_width: ArchWidth) -> Result<(), Errno>

Source

pub fn initialize_mmap_layout_for_test(self: &Arc<Self>, arch_width: ArchWidth)

Source

pub fn initialize_brk_origin( self: &Arc<Self>, arch_width: ArchWidth, executable_end: UserAddress, ) -> Result<(), Errno>

Source

pub fn get_random_base_for_executable( &self, arch_width: ArchWidth, length: usize, ) -> Result<UserAddress, Errno>

Source

pub fn executable_node(&self) -> Option<NamespaceNode>

Source

pub fn get_errno_for_map_err(status: Status) -> Errno

Source

pub fn get_errno_for_vmo_err(status: Status) -> Errno

Source

pub fn map_memory( self: &Arc<Self>, addr: DesiredAddress, memory: Arc<MemoryObject>, memory_offset: u64, length: usize, prot_flags: ProtectionFlags, max_access: Access, options: MappingOptions, name: MappingName, ) -> Result<UserAddress, Errno>

Source

pub fn map_anonymous( self: &Arc<Self>, addr: DesiredAddress, length: usize, prot_flags: ProtectionFlags, options: MappingOptions, name: MappingName, ) -> Result<UserAddress, Errno>

Source

pub fn map_stack( self: &Arc<Self>, length: usize, prot_flags: ProtectionFlags, ) -> Result<UserAddress, Errno>

Map the stack into a pre-selected address region

Source

pub fn remap( self: &Arc<Self>, current_task: &CurrentTask, addr: UserAddress, old_length: usize, new_length: usize, flags: MremapFlags, new_addr: UserAddress, ) -> Result<UserAddress, Errno>

Source

pub fn unmap( self: &Arc<Self>, addr: UserAddress, length: usize, ) -> Result<(), Errno>

Source

pub fn protect( &self, current_task: &CurrentTask, addr: UserAddress, length: usize, prot_flags: ProtectionFlags, ) -> Result<(), Errno>

Source

pub fn madvise( &self, current_task: &CurrentTask, addr: UserAddress, length: usize, advice: u32, ) -> Result<(), Errno>

Source

pub fn mlock<L>( &self, current_task: &CurrentTask, locked: &mut Locked<L>, desired_addr: UserAddress, desired_length: usize, on_fault: bool, ) -> Result<(), Errno>

Source

pub fn munlock( &self, current_task: &CurrentTask, desired_addr: UserAddress, desired_length: usize, ) -> Result<(), Errno>

Source

pub fn handle_page_fault( self: &Arc<Self>, locked: &mut Locked<Unlocked>, decoded: PageFaultExceptionReport, error_code: Status, ) -> ExceptionResult

Source

pub fn set_mapping_name( &self, addr: UserAddress, length: usize, name: Option<FsString>, ) -> Result<(), Errno>

Source

pub fn ensure_mapped( &self, addr: UserAddress, length: usize, ) -> Result<(), Errno>

Returns Ok if the entire range specified by addr..(addr+length) contains valid mappings.

§Errors

Returns [Err(errno)] where errno is:

  • EINVAL: addr is not page-aligned, or the range is too large,
  • ENOMEM: one or more pages in the range are not mapped.
Source

pub fn get_mapping_memory( &self, addr: UserAddress, perms: ProtectionFlags, ) -> Result<(Arc<MemoryObject>, u64), Errno>

Returns the memory object mapped at the address and the offset into the memory object of the address. Intended for implementing futexes.

Source

pub fn check_plausible( &self, addr: UserAddress, buffer_size: usize, ) -> Result<(), Errno>

Does a rough check that the given address is plausibly in the address space of the application. This does not mean the pointer is valid for any particular purpose or that it will remain so!

In some syscalls, Linux seems to do some initial validation of the pointer up front to tell the caller early if it’s invalid. For example, in epoll_wait() it’s returning a vector of events. If the caller passes an invalid pointer, it wants to fail without dropping any events. Failing later when actually copying the required events to userspace would mean those events will be lost. But holding a lock on the memory manager for an asynchronous wait is not desirable.

Testing shows that Linux seems to do some initial plausibility checking of the pointer to be able to report common usage errors before doing any (possibly unreversable) work. This checking is easy to get around if you try, so this function is also not required to be particularly robust. Certainly the more advanced cases of races (the memory could be unmapped after this call but before it’s used) are not handled.

The buffer_size variable is the size of the data structure that needs to fit in the given memory.

Returns the error EFAULT if invalid.

Source

pub fn get_aio_context(&self, addr: UserAddress) -> Option<Arc<AioContext>>

Source

pub fn destroy_aio_context( self: &Arc<Self>, addr: UserAddress, ) -> Result<Arc<AioContext>, Errno>

Source

pub fn extend_growsdown_mapping_to_address( self: &Arc<Self>, addr: UserAddress, is_write: bool, ) -> Result<bool, Error>

Source

pub fn get_stats(&self, current_task: &CurrentTask) -> MemoryStats

Source

pub fn atomic_load_u32_acquire( &self, futex_addr: FutexAddress, ) -> Result<u32, Errno>

Source

pub fn atomic_load_u32_relaxed( &self, futex_addr: FutexAddress, ) -> Result<u32, Errno>

Source

pub fn atomic_store_u32_relaxed( &self, futex_addr: FutexAddress, value: u32, ) -> Result<(), Errno>

Source

pub fn atomic_compare_exchange_u32_acq_rel( &self, futex_addr: FutexAddress, current: u32, new: u32, ) -> CompareExchangeResult<u32>

Source

pub fn atomic_compare_exchange_weak_u32_acq_rel( &self, futex_addr: FutexAddress, current: u32, new: u32, ) -> CompareExchangeResult<u32>

Source

pub fn get_restricted_vmar_info(&self) -> Option<VmarInfo>

Auto Trait Implementations§

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> AsAny for T
where T: Any,

Source§

fn as_any(&self) -> &(dyn Any + 'static)

Source§

fn type_name(&self) -> &'static str

Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T, D> Encode<Ambiguous1, D> for T
where D: ResourceDialect,

Source§

unsafe fn encode( self, _encoder: &mut Encoder<'_, D>, _offset: usize, _depth: Depth, ) -> Result<(), Error>

Encodes the object into the encoder’s buffers. Any handles stored in the object are swapped for Handle::INVALID. Read more
Source§

impl<T, D> Encode<Ambiguous2, D> for T
where D: ResourceDialect,

Source§

unsafe fn encode( self, _encoder: &mut Encoder<'_, D>, _offset: usize, _depth: Depth, ) -> Result<(), Error>

Encodes the object into the encoder’s buffers. Any handles stored in the object are swapped for Handle::INVALID. Read more
Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

§

impl<T> InstanceFromServiceTransport<T> for T

§

fn from_service_transport(handle: T) -> T

Converts the given service transport handle of type T to [Self]
Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T, U> Into32<U> for T
where U: MultiArchFrom<T>,

Source§

fn into_32(self) -> U

Source§

impl<T, U> Into64<U> for T
where U: MultiArchFrom<T>,

Source§

fn into_64(self) -> U

Source§

impl<T> IntoAny for T
where T: 'static + Send + Sync,

Source§

fn into_any(self: Arc<T>) -> Arc<dyn Any + Sync + Send>

Cast the given object into a dyn std::any::Any.
Source§

impl<T> IntoEither for T

Source§

fn into_either(self, into_left: bool) -> Either<Self, Self>

Converts self into a Left variant of Either<Self, Self> if into_left is true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
Source§

fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
where F: FnOnce(&Self) -> bool,

Converts self into a Left variant of Either<Self, Self> if into_left(&self) returns true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
§

impl<T, U> IntoExt<U> for T
where U: FromExt<T>,

§

fn into_ext(self) -> U

Performs the conversion.
Source§

impl<T, U> IntoFidl<U> for T
where U: FromFidl<T>,

Source§

fn into_fidl(self) -> U

Source§

impl<T, U> MultiArchFrom<T> for U
where U: From<T>,

Source§

fn from_64(value: T) -> U

Source§

fn from_32(value: T) -> U

§

impl<T> Pointable for T

§

const ALIGN: usize

The alignment of pointer.
§

type Init = T

The type for initializers.
§

unsafe fn init(init: <T as Pointable>::Init) -> usize

Initializes a with the given initializer. Read more
§

unsafe fn deref<'a>(ptr: usize) -> &'a T

Dereferences the given pointer. Read more
§

unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut T

Mutably dereferences the given pointer. Read more
§

unsafe fn drop(ptr: usize)

Drops the object pointed to by the given pointer. Read more
Source§

impl<T> Same for T

Source§

type Output = T

Should always be Self
Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
§

impl<T, U> TryIntoExt<U> for T
where U: TryFromExt<T>,

§

type Error = <U as TryFromExt<T>>::Error

§

fn try_into_ext(self) -> Result<U, <T as TryIntoExt<U>>::Error>

Tries to perform the conversion.
§

impl<V, T> VZip<V> for T
where V: MultiLane<T>,

§

fn vzip(self) -> V

§

impl<St> WithTag for St

§

fn tagged<T>(self, tag: T) -> Tagged<T, St>

Produce a new stream from this one which yields item tupled with a constant tag
Source§

impl<B, A> LockBefore<B> for A
where B: LockAfter<A>,

Source§

impl<B, A> LockEqualOrBefore<B> for A
where A: LockBefore<B>,

§

impl<E> RunsTransport<Mpsc> for E

§

impl<E> RunsTransport<Mpsc> for E
where E: RunsTransport<Mpsc>,