pub struct MemoryManager {
pub base_addr: UserAddress,
pub futex: Arc<FutexTable<PrivateFutexKey>>,
pub state: RwLock<MemoryManagerState>,
pub dumpable: OrderedMutex<DumpPolicy, MmDumpable>,
pub maximum_valid_user_address: UserAddress,
pub inflight_vmspliced_payloads: InflightVmsplicedPayloads,
pub drop_notifier: DropNotifier,
/* private fields */
}Fields§
§base_addr: UserAddressThe base address of the root_vmar.
futex: Arc<FutexTable<PrivateFutexKey>>The futexes in this address space.
state: RwLock<MemoryManagerState>Mutable state for the memory manager.
dumpable: OrderedMutex<DumpPolicy, MmDumpable>Whether this address space is dumpable.
maximum_valid_user_address: UserAddressMaximum valid user address for this vmar.
inflight_vmspliced_payloads: InflightVmsplicedPayloadsIn-flight payloads enqueued to a pipe as a consequence of a vmsplice(2)
operation.
For details on why we need to keep track of in-flight vmspliced payloads,
see VmsplicePayload.
For details on why this isn’t under the RwLock protected MemoryManagerState,
See InflightVmsplicedPayloads::payloads.
drop_notifier: DropNotifierA mechanism to be notified when this MemoryManager is destroyed.
Implementations§
Source§impl MemoryManager
impl MemoryManager
pub fn summarize(&self, summary: &mut MappingSummary)
pub fn get_mappings_for_vmsplice( self: &Arc<MemoryManager>, buffers: &UserBuffers, ) -> Result<Vec<Arc<VmsplicePayload>>, Errno>
pub fn has_same_address_space(&self, other: &Self) -> bool
pub fn unified_read_memory<'a>( &self, current_task: &CurrentTask, addr: UserAddress, bytes: &'a mut [MaybeUninit<u8>], ) -> Result<&'a mut [u8], Errno>
pub fn syscall_read_memory<'a>( &self, addr: UserAddress, bytes: &'a mut [MaybeUninit<u8>], ) -> Result<&'a mut [u8], Errno>
pub fn unified_read_memory_partial_until_null_byte<'a>( &self, current_task: &CurrentTask, addr: UserAddress, bytes: &'a mut [MaybeUninit<u8>], ) -> Result<&'a mut [u8], Errno>
pub fn syscall_read_memory_partial_until_null_byte<'a>( &self, addr: UserAddress, bytes: &'a mut [MaybeUninit<u8>], ) -> Result<&'a mut [u8], Errno>
pub fn unified_read_memory_partial<'a>( &self, current_task: &CurrentTask, addr: UserAddress, bytes: &'a mut [MaybeUninit<u8>], ) -> Result<&'a mut [u8], Errno>
pub fn syscall_read_memory_partial<'a>( &self, addr: UserAddress, bytes: &'a mut [MaybeUninit<u8>], ) -> Result<&'a mut [u8], Errno>
pub fn unified_write_memory( &self, current_task: &CurrentTask, addr: UserAddress, bytes: &[u8], ) -> Result<usize, Errno>
Sourcepub fn force_write_memory(
&self,
addr: UserAddress,
bytes: &[u8],
) -> Result<(), Errno>
pub fn force_write_memory( &self, addr: UserAddress, bytes: &[u8], ) -> Result<(), Errno>
Write bytes to memory address addr, making a copy-on-write child of the VMO backing and
replacing the mapping if necessary.
NOTE: this bypasses userspace’s memory protection configuration and should only be called by codepaths like ptrace which bypass memory protection.
pub fn syscall_write_memory( &self, addr: UserAddress, bytes: &[u8], ) -> Result<usize, Errno>
pub fn unified_write_memory_partial( &self, current_task: &CurrentTask, addr: UserAddress, bytes: &[u8], ) -> Result<usize, Errno>
pub fn syscall_write_memory_partial( &self, addr: UserAddress, bytes: &[u8], ) -> Result<usize, Errno>
pub fn unified_zero( &self, current_task: &CurrentTask, addr: UserAddress, length: usize, ) -> Result<usize, Errno>
pub fn syscall_zero( &self, addr: UserAddress, length: usize, ) -> Result<usize, Errno>
Sourcepub fn as_remote(self: &Arc<Self>) -> RemoteMemoryManager
pub fn as_remote(self: &Arc<Self>) -> RemoteMemoryManager
Obtain a reference to this memory manager that can be used from another thread.
Sourcepub fn cache_flush(&self, range: Range<UserAddress>) -> Result<(), Errno>
pub fn cache_flush(&self, range: Range<UserAddress>) -> Result<(), Errno>
Performs a data and instruction cache flush over the given address range.
Sourcepub fn register_membarrier_private_expedited(
&self,
mtype: MembarrierType,
) -> Result<(), Errno>
pub fn register_membarrier_private_expedited( &self, mtype: MembarrierType, ) -> Result<(), Errno>
Register the address space managed by this memory manager for interest in receiving private expedited memory barriers of the given type.
Sourcepub fn membarrier_private_expedited_registered(
&self,
mtype: MembarrierType,
) -> bool
pub fn membarrier_private_expedited_registered( &self, mtype: MembarrierType, ) -> bool
Checks if the address space managed by this memory manager is registered for interest in private expedited barriers of the given kind.
Source§impl MemoryManager
impl MemoryManager
pub fn new(root_vmar: Vmar) -> Result<Self, Status>
pub fn set_brk<L>(
self: &Arc<Self>,
locked: &mut Locked<L>,
current_task: &CurrentTask,
addr: UserAddress,
) -> Result<UserAddress, Errno>where
L: LockBefore<ThreadGroupLimits>,
pub fn register_uffd(&self, userfault: &Arc<UserFault>)
Sourcepub fn register_with_uffd<L>(
self: &Arc<Self>,
locked: &mut Locked<L>,
addr: UserAddress,
length: usize,
userfault: &Arc<UserFault>,
mode: FaultRegisterMode,
) -> Result<(), Errno>where
L: LockBefore<UserFaultInner>,
pub fn register_with_uffd<L>(
self: &Arc<Self>,
locked: &mut Locked<L>,
addr: UserAddress,
length: usize,
userfault: &Arc<UserFault>,
mode: FaultRegisterMode,
) -> Result<(), Errno>where
L: LockBefore<UserFaultInner>,
Register a given memory range with a userfault object.
Sourcepub fn unregister_range_from_uffd<L>(
&self,
locked: &mut Locked<L>,
userfault: &Arc<UserFault>,
addr: UserAddress,
length: usize,
) -> Result<(), Errno>where
L: LockBefore<UserFaultInner>,
pub fn unregister_range_from_uffd<L>(
&self,
locked: &mut Locked<L>,
userfault: &Arc<UserFault>,
addr: UserAddress,
length: usize,
) -> Result<(), Errno>where
L: LockBefore<UserFaultInner>,
Unregister a given range from any userfault objects associated with it.
Sourcepub fn unregister_uffd<L>(
&self,
locked: &mut Locked<L>,
userfault: &Arc<UserFault>,
)where
L: LockBefore<UserFaultInner>,
pub fn unregister_uffd<L>(
&self,
locked: &mut Locked<L>,
userfault: &Arc<UserFault>,
)where
L: LockBefore<UserFaultInner>,
Unregister any mappings registered with a given userfault object. Used when closing the last file descriptor associated to it.
Sourcepub fn populate_from_uffd<F, L>(
&self,
locked: &mut Locked<L>,
addr: UserAddress,
length: usize,
userfault: &Arc<UserFault>,
populate: F,
) -> Result<usize, Errno>
pub fn populate_from_uffd<F, L>( &self, locked: &mut Locked<L>, addr: UserAddress, length: usize, userfault: &Arc<UserFault>, populate: F, ) -> Result<usize, Errno>
Populate a range of pages registered with an userfaulfd according to a populate function.
This will fail if the pages were not registered with userfaultfd, or if the page at addr
was already populated. If any page other than the first one was populated, the length
is adjusted to only include the first N unpopulated pages, and this adjusted length
is then passed to populate. On success, returns the number of populated bytes.
pub fn zero_from_uffd<L>(
&self,
locked: &mut Locked<L>,
addr: UserAddress,
length: usize,
userfault: &Arc<UserFault>,
) -> Result<usize, Errno>where
L: LockBefore<UserFaultInner>,
pub fn fill_from_uffd<L>(
&self,
locked: &mut Locked<L>,
addr: UserAddress,
buf: &[u8],
length: usize,
userfault: &Arc<UserFault>,
) -> Result<usize, Errno>where
L: LockBefore<UserFaultInner>,
pub fn copy_from_uffd<L>(
&self,
locked: &mut Locked<L>,
source_addr: UserAddress,
dst_addr: UserAddress,
length: usize,
userfault: &Arc<UserFault>,
) -> Result<usize, Errno>where
L: LockBefore<UserFaultInner>,
Sourcepub fn snapshot_to<L>(
&self,
locked: &mut Locked<L>,
target: &Arc<MemoryManager>,
) -> Result<(), Errno>where
L: LockBefore<MmDumpable>,
pub fn snapshot_to<L>(
&self,
locked: &mut Locked<L>,
target: &Arc<MemoryManager>,
) -> Result<(), Errno>where
L: LockBefore<MmDumpable>,
Create a snapshot of the memory mapping from self into target. All
memory mappings are copied entry-for-entry, and the copies end up at
exactly the same addresses.
Sourcepub fn exec(
&self,
exe_node: NamespaceNode,
arch_width: ArchWidth,
) -> Result<Arc<Self>, Status>
pub fn exec( &self, exe_node: NamespaceNode, arch_width: ArchWidth, ) -> Result<Arc<Self>, Status>
Returns the replacement MemoryManager to be used by the exec()ing task.
POSIX requires that “a call to any exec function from a process with more than one thread
shall result in all threads being terminated and the new executable being loaded and
executed. No destructor functions or cleanup handlers shall be called”.
The caller is responsible for having ensured that this is the only Task in the
ThreadGroup, and thereby the zx::process, such that it is safe to tear-down the Zircon
userspace VMAR for the current address-space.
pub fn initialize_mmap_layout(&self, arch_width: ArchWidth) -> Result<(), Errno>
pub fn initialize_mmap_layout_for_test(self: &Arc<Self>, arch_width: ArchWidth)
pub fn initialize_brk_origin( self: &Arc<Self>, arch_width: ArchWidth, executable_end: UserAddress, ) -> Result<(), Errno>
pub fn get_random_base_for_executable( &self, arch_width: ArchWidth, length: usize, ) -> Result<UserAddress, Errno>
pub fn executable_node(&self) -> Option<NamespaceNode>
pub fn get_errno_for_map_err(status: Status) -> Errno
pub fn get_errno_for_vmo_err(status: Status) -> Errno
pub fn map_memory( self: &Arc<Self>, addr: DesiredAddress, memory: Arc<MemoryObject>, memory_offset: u64, length: usize, prot_flags: ProtectionFlags, max_access: Access, options: MappingOptions, name: MappingName, ) -> Result<UserAddress, Errno>
pub fn map_anonymous( self: &Arc<Self>, addr: DesiredAddress, length: usize, prot_flags: ProtectionFlags, options: MappingOptions, name: MappingName, ) -> Result<UserAddress, Errno>
Sourcepub fn map_stack(
self: &Arc<Self>,
length: usize,
prot_flags: ProtectionFlags,
) -> Result<UserAddress, Errno>
pub fn map_stack( self: &Arc<Self>, length: usize, prot_flags: ProtectionFlags, ) -> Result<UserAddress, Errno>
Map the stack into a pre-selected address region
pub fn remap( self: &Arc<Self>, current_task: &CurrentTask, addr: UserAddress, old_length: usize, new_length: usize, flags: MremapFlags, new_addr: UserAddress, ) -> Result<UserAddress, Errno>
pub fn unmap( self: &Arc<Self>, addr: UserAddress, length: usize, ) -> Result<(), Errno>
pub fn protect( &self, current_task: &CurrentTask, addr: UserAddress, length: usize, prot_flags: ProtectionFlags, ) -> Result<(), Errno>
pub fn madvise( &self, current_task: &CurrentTask, addr: UserAddress, length: usize, advice: u32, ) -> Result<(), Errno>
pub fn mlock<L>(
&self,
current_task: &CurrentTask,
locked: &mut Locked<L>,
desired_addr: UserAddress,
desired_length: usize,
on_fault: bool,
) -> Result<(), Errno>where
L: LockBefore<ThreadGroupLimits>,
pub fn munlock( &self, current_task: &CurrentTask, desired_addr: UserAddress, desired_length: usize, ) -> Result<(), Errno>
pub fn handle_page_fault( self: &Arc<Self>, locked: &mut Locked<Unlocked>, decoded: PageFaultExceptionReport, error_code: Status, ) -> ExceptionResult
pub fn set_mapping_name( &self, addr: UserAddress, length: usize, name: Option<FsString>, ) -> Result<(), Errno>
Sourcepub fn ensure_mapped(
&self,
addr: UserAddress,
length: usize,
) -> Result<(), Errno>
pub fn ensure_mapped( &self, addr: UserAddress, length: usize, ) -> Result<(), Errno>
Sourcepub fn get_mapping_memory(
&self,
addr: UserAddress,
perms: ProtectionFlags,
) -> Result<(Arc<MemoryObject>, u64), Errno>
pub fn get_mapping_memory( &self, addr: UserAddress, perms: ProtectionFlags, ) -> Result<(Arc<MemoryObject>, u64), Errno>
Returns the memory object mapped at the address and the offset into the memory object of the address. Intended for implementing futexes.
Sourcepub fn check_plausible(
&self,
addr: UserAddress,
buffer_size: usize,
) -> Result<(), Errno>
pub fn check_plausible( &self, addr: UserAddress, buffer_size: usize, ) -> Result<(), Errno>
Does a rough check that the given address is plausibly in the address space of the application. This does not mean the pointer is valid for any particular purpose or that it will remain so!
In some syscalls, Linux seems to do some initial validation of the pointer up front to tell the caller early if it’s invalid. For example, in epoll_wait() it’s returning a vector of events. If the caller passes an invalid pointer, it wants to fail without dropping any events. Failing later when actually copying the required events to userspace would mean those events will be lost. But holding a lock on the memory manager for an asynchronous wait is not desirable.
Testing shows that Linux seems to do some initial plausibility checking of the pointer to be able to report common usage errors before doing any (possibly unreversable) work. This checking is easy to get around if you try, so this function is also not required to be particularly robust. Certainly the more advanced cases of races (the memory could be unmapped after this call but before it’s used) are not handled.
The buffer_size variable is the size of the data structure that needs to fit in the given memory.
Returns the error EFAULT if invalid.
pub fn get_aio_context(&self, addr: UserAddress) -> Option<Arc<AioContext>>
pub fn destroy_aio_context( self: &Arc<Self>, addr: UserAddress, ) -> Result<Arc<AioContext>, Errno>
pub fn extend_growsdown_mapping_to_address( self: &Arc<Self>, addr: UserAddress, is_write: bool, ) -> Result<bool, Error>
pub fn get_stats(&self, current_task: &CurrentTask) -> MemoryStats
pub fn atomic_load_u32_acquire( &self, futex_addr: FutexAddress, ) -> Result<u32, Errno>
pub fn atomic_load_u32_relaxed( &self, futex_addr: FutexAddress, ) -> Result<u32, Errno>
pub fn atomic_store_u32_relaxed( &self, futex_addr: FutexAddress, value: u32, ) -> Result<(), Errno>
pub fn atomic_compare_exchange_u32_acq_rel( &self, futex_addr: FutexAddress, current: u32, new: u32, ) -> CompareExchangeResult<u32>
pub fn atomic_compare_exchange_weak_u32_acq_rel( &self, futex_addr: FutexAddress, current: u32, new: u32, ) -> CompareExchangeResult<u32>
pub fn get_restricted_vmar_info(&self) -> Option<VmarInfo>
Auto Trait Implementations§
impl !Freeze for MemoryManager
impl !RefUnwindSafe for MemoryManager
impl Send for MemoryManager
impl Sync for MemoryManager
impl Unpin for MemoryManager
impl !UnwindSafe for MemoryManager
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Source§impl<T, D> Encode<Ambiguous1, D> for Twhere
D: ResourceDialect,
impl<T, D> Encode<Ambiguous1, D> for Twhere
D: ResourceDialect,
Source§impl<T, D> Encode<Ambiguous2, D> for Twhere
D: ResourceDialect,
impl<T, D> Encode<Ambiguous2, D> for Twhere
D: ResourceDialect,
§impl<T> InstanceFromServiceTransport<T> for T
impl<T> InstanceFromServiceTransport<T> for T
§fn from_service_transport(handle: T) -> T
fn from_service_transport(handle: T) -> T
T to [Self]Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
self into a Left variant of Either<Self, Self>
if into_left is true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
self into a Left variant of Either<Self, Self>
if into_left(&self) returns true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read more