Skip to main content

starnix_core/execution/
task_creation.rs

1// Copyright 2025 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use crate::mm::MemoryManager;
6use crate::security;
7use crate::signals::SignalActions;
8use crate::task::{
9    CurrentTask, Kernel, PidTable, ProcessGroup, RobustListHeadPtr, SeccompFilterContainer,
10    SeccompState, Task, TaskBuilder, ThreadGroup, ThreadGroupParent, ThreadGroupWriteGuard,
11};
12use crate::vfs::{FdTable, FsContext};
13use starnix_sync::{
14    LockBefore, Locked, ProcessGroupState, RwLockWriteGuard, TaskRelease, Unlocked,
15};
16use starnix_task_command::TaskCommand;
17use starnix_types::arch::ArchWidth;
18use starnix_types::ownership::TempRef;
19use starnix_types::release_on_error;
20use starnix_uapi::auth::Credentials;
21use starnix_uapi::errors::Errno;
22use starnix_uapi::resource_limits::Resource;
23use starnix_uapi::signals::{SIGCHLD, Signal};
24use starnix_uapi::{errno, error, from_status_like_fdio, pid_t, rlimit};
25use std::ffi::CString;
26use std::sync::Arc;
27
28/// Result returned when creating new Zircon threads and processes for tasks.
29pub struct TaskInfo {
30    /// The thread that was created for the task.
31    pub thread: Option<zx::Thread>,
32
33    /// The thread group that the task should be added to.
34    pub thread_group: Arc<ThreadGroup>,
35
36    /// The memory manager to use for the task.
37    pub memory_manager: Option<Arc<MemoryManager>>,
38}
39
40pub fn create_zircon_process<L>(
41    locked: &mut Locked<L>,
42    kernel: &Arc<Kernel>,
43    parent: Option<ThreadGroupWriteGuard<'_>>,
44    pid: pid_t,
45    exit_signal: Option<Signal>,
46    process_group: Arc<ProcessGroup>,
47    signal_actions: Arc<SignalActions>,
48    name: TaskCommand,
49) -> Result<TaskInfo, Errno>
50where
51    L: LockBefore<ProcessGroupState>,
52{
53    // Don't allow new processes to be created once the kernel has started shutting down.
54    if kernel.is_shutting_down() {
55        return error!(EBUSY);
56    }
57    let (process, root_vmar) =
58        create_shared(&kernel.kthreads.starnix_process, zx::ProcessOptions::empty(), name)
59            .map_err(|status| from_status_like_fdio!(status))?;
60
61    // Make sure that if this process panics in normal mode that the whole kernel's job is killed.
62    fuchsia_runtime::job_default()
63        .set_critical(zx::JobCriticalOptions::RETCODE_NONZERO, &process)
64        .map_err(|status| from_status_like_fdio!(status))?;
65
66    let thread_group = ThreadGroup::new(
67        locked,
68        kernel.clone(),
69        process,
70        root_vmar,
71        parent,
72        pid,
73        exit_signal,
74        process_group,
75        signal_actions,
76    );
77
78    Ok(TaskInfo { thread: None, thread_group, memory_manager: None })
79}
80
81/// Creates a process that shares half its address space with this process.
82///
83/// The created process will also share its handle table and futex context with `self`.
84///
85/// Returns the created process and a handle to the created process' restricted address space.
86///
87/// Wraps the
88/// [zx_process_create_shared](https://fuchsia.dev/fuchsia-src/reference/syscalls/process_create_shared.md)
89/// syscall.
90fn create_shared(
91    process: &zx::Process,
92    options: zx::ProcessOptions,
93    name: TaskCommand,
94) -> Result<(zx::Process, zx::Vmar), zx::Status> {
95    let self_raw = process.raw_handle();
96    let name_bytes = name.as_bytes();
97    let mut process_out = 0;
98    let mut restricted_vmar_out = 0;
99    #[allow(
100        clippy::undocumented_unsafe_blocks,
101        reason = "Force documented unsafe blocks in Starnix"
102    )]
103    let status = unsafe {
104        zx::sys::zx_process_create_shared(
105            self_raw,
106            options.bits(),
107            name_bytes.as_ptr(),
108            name_bytes.len(),
109            &mut process_out,
110            &mut restricted_vmar_out,
111        )
112    };
113    zx::ok(status)?;
114    #[allow(
115        clippy::undocumented_unsafe_blocks,
116        reason = "Force documented unsafe blocks in Starnix"
117    )]
118    unsafe {
119        Ok((
120            zx::Process::from(zx::NullableHandle::from_raw(process_out)),
121            zx::Vmar::from(zx::NullableHandle::from_raw(restricted_vmar_out)),
122        ))
123    }
124}
125
126/// Create a process that is a child of the `init` process.
127///
128/// The created process will be a task that is the leader of a new thread group.
129///
130/// Most processes are created by userspace and are descendants of the `init` process. In
131/// some situations, the kernel needs to create a process itself. This function is the
132/// preferred way of creating an actual userspace process because making the process a child of
133/// `init` means that `init` is responsible for waiting on the process when it dies and thereby
134/// cleaning up its zombie.
135///
136/// If you just need a kernel task, and not an entire userspace process, consider using
137/// `create_system_task` instead. Even better, consider using the `kthreads` threadpool.
138///
139/// If `seclabel` is set, or the container specified a `default_seclabel`, then it will be
140/// resolved against the `kernel`'s active security policy, and applied to the new task.
141/// Otherwise the task will inherit its LSM state from the "init" task.
142///
143/// This function creates an underlying Zircon process to host the new task.
144pub fn create_init_child_process<L>(
145    locked: &mut Locked<L>,
146    kernel: &Arc<Kernel>,
147    initial_name: TaskCommand,
148    mut creds: Credentials,
149    seclabel: Option<&CString>,
150) -> Result<TaskBuilder, Errno>
151where
152    L: LockBefore<TaskRelease>,
153{
154    let weak_init = kernel.pids.read().get_task(1);
155    let init_task = weak_init.upgrade().ok_or_else(|| errno!(EINVAL))?;
156    let init_live = init_task.live()?;
157
158    let security_state = if let Some(seclabel) = seclabel {
159        security::task_for_context(&init_task, seclabel.as_bytes().into())?
160    } else if let Some(default_seclabel) = kernel.features.default_seclabel.as_ref() {
161        security::task_for_context(&init_task, default_seclabel.as_bytes().into())?
162    } else {
163        // If SELinux is enabled then this call will fail with `EINVAL`.
164        security::task_for_context(&init_task, b"".into()).map_err(|_| {
165            errno!(EINVAL, "Container has SELinux enabled but no Security Context specified")
166        })?
167    };
168    creds.security_state = security_state;
169
170    let task = create_task(
171        locked,
172        kernel,
173        initial_name.clone(),
174        init_live.fs().fork(),
175        |locked, pid, process_group| {
176            create_zircon_process(
177                locked.cast_locked::<TaskRelease>(),
178                kernel,
179                None,
180                pid,
181                Some(SIGCHLD),
182                process_group,
183                SignalActions::default(),
184                initial_name.clone(),
185            )
186        },
187        creds.into(),
188    )?;
189    {
190        let mut init_writer = init_task.thread_group().write();
191        let mut new_process_writer = task.thread_group().write();
192        new_process_writer.parent =
193            Some(ThreadGroupParent::new(Arc::downgrade(&init_task.thread_group())));
194        init_writer.children.insert(task.tid, Arc::downgrade(task.thread_group()));
195    }
196    // A child process created via fork(2) inherits its parent's
197    // resource limits.  Resource limits are preserved across execve(2).
198    let limits = init_task.thread_group().limits.lock(locked.cast_locked::<TaskRelease>()).clone();
199    *task.thread_group().limits.lock(locked.cast_locked::<TaskRelease>()) = limits;
200    Ok(task)
201}
202
203/// Creates the initial process for a kernel.
204///
205/// The created process will be a task that is the leader of a new thread group.
206///
207/// The init process is special because it's the root of the parent/child relationship between
208/// tasks. If a task dies, the init process is ultimately responsible for waiting on that task
209/// and removing it from the zombie list.
210///
211/// It's possible for the kernel to create tasks whose ultimate parent isn't init, but such
212/// tasks cannot be created by userspace directly.
213///
214/// This function should only be called as part of booting a kernel instance. To create a
215/// process after the kernel has already booted, consider `create_init_child_process`
216/// or `create_system_task`.
217///
218/// The process created by this function should always have pid 1. We require the caller to
219/// pass the `pid` as an argument to clarify that it's the callers responsibility to determine
220/// the pid for the process.
221pub fn create_init_process(
222    locked: &mut Locked<Unlocked>,
223    kernel: &Arc<Kernel>,
224    pid: pid_t,
225    initial_name: TaskCommand,
226    fs: Arc<FsContext>,
227    rlimits: &[(Resource, u64)],
228) -> Result<TaskBuilder, Errno> {
229    let pids = kernel.pids.write();
230    create_task_with_pid(
231        locked,
232        kernel,
233        pids,
234        pid,
235        initial_name.clone(),
236        fs,
237        |locked, pid, process_group| {
238            create_zircon_process(
239                locked,
240                kernel,
241                None,
242                pid,
243                Some(SIGCHLD),
244                process_group,
245                SignalActions::default(),
246                initial_name.clone(),
247            )
248        },
249        Credentials::root(),
250        rlimits,
251    )
252}
253
254/// Create a task that runs inside the kernel.
255///
256/// There is no underlying Zircon process to host the task. Instead, the work done by this task
257/// is performed by a thread in the original Starnix process, possible as part of a thread
258/// pool.
259///
260/// This function is the preferred way to create a context for doing background work inside the
261/// kernel.
262///
263/// Rather than calling this function directly, consider using `kthreads`, which provides both
264/// a system task and a threadpool on which the task can do work.
265pub fn create_system_task<L>(
266    locked: &mut Locked<L>,
267    kernel: &Arc<Kernel>,
268    fs: Arc<FsContext>,
269) -> Result<CurrentTask, Errno>
270where
271    L: LockBefore<TaskRelease>,
272{
273    let builder = create_task(
274        locked,
275        kernel,
276        TaskCommand::new(b"kthreadd"),
277        fs,
278        |locked, pid, process_group| {
279            let thread_group = ThreadGroup::new(
280                locked.cast_locked::<TaskRelease>(),
281                kernel.clone(),
282                zx::Process::invalid(),
283                zx::Vmar::invalid(),
284                None,
285                pid,
286                Some(SIGCHLD),
287                process_group,
288                SignalActions::default(),
289            );
290            Ok(TaskInfo { thread: None, thread_group, memory_manager: None }.into())
291        },
292        Credentials::root(),
293    )?;
294    Ok(builder.into())
295}
296
297pub fn create_task<F, L>(
298    locked: &mut Locked<L>,
299    kernel: &Kernel,
300    initial_name: TaskCommand,
301    root_fs: Arc<FsContext>,
302    task_info_factory: F,
303    creds: Arc<Credentials>,
304) -> Result<TaskBuilder, Errno>
305where
306    F: FnOnce(&mut Locked<L>, i32, Arc<ProcessGroup>) -> Result<TaskInfo, Errno>,
307    L: LockBefore<TaskRelease>,
308{
309    let mut pids = kernel.pids.write();
310    let pid = pids.allocate_pid();
311    create_task_with_pid(
312        locked,
313        kernel,
314        pids,
315        pid,
316        initial_name,
317        root_fs,
318        task_info_factory,
319        creds,
320        &[],
321    )
322}
323
324fn create_task_with_pid<F, L>(
325    locked: &mut Locked<L>,
326    kernel: &Kernel,
327    mut pids: RwLockWriteGuard<'_, PidTable>,
328    pid: pid_t,
329    initial_name: TaskCommand,
330    root_fs: Arc<FsContext>,
331    task_info_factory: F,
332    creds: Arc<Credentials>,
333    rlimits: &[(Resource, u64)],
334) -> Result<TaskBuilder, Errno>
335where
336    F: FnOnce(&mut Locked<L>, i32, Arc<ProcessGroup>) -> Result<TaskInfo, Errno>,
337    L: LockBefore<TaskRelease>,
338{
339    debug_assert!(pids.get_task(pid).upgrade().is_none());
340
341    let process_group = ProcessGroup::new(pid, None);
342    pids.add_process_group(process_group.clone());
343
344    let TaskInfo { thread, thread_group, memory_manager } =
345        task_info_factory(locked, pid, process_group.clone())?;
346
347    process_group.insert(locked.cast_locked::<TaskRelease>(), &thread_group);
348
349    // > The timer slack values of init (PID 1), the ancestor of all processes, are 50,000
350    // > nanoseconds (50 microseconds).  The timer slack value is inherited by a child created
351    // > via fork(2), and is preserved across execve(2).
352    // https://man7.org/linux/man-pages/man2/prctl.2.html
353    let default_timerslack = 50_000;
354    let builder = TaskBuilder {
355        task: Task::new(
356            pid,
357            initial_name,
358            thread_group,
359            thread,
360            FdTable::default(),
361            memory_manager,
362            root_fs,
363            creds,
364            Arc::clone(&kernel.default_abstract_socket_namespace),
365            Arc::clone(&kernel.default_abstract_vsock_namespace),
366            Default::default(),
367            Default::default(),
368            None,
369            Default::default(),
370            kernel.root_uts_ns.clone(),
371            false,
372            SeccompState::default(),
373            SeccompFilterContainer::default(),
374            RobustListHeadPtr::null(&ArchWidth::Arch64),
375            default_timerslack,
376        ),
377        thread_state: Default::default(),
378    };
379    release_on_error!(builder, locked, {
380        let temp_task = TempRef::from(&builder.task);
381        builder.thread_group().add(&temp_task)?;
382        for (resource, limit) in rlimits {
383            builder
384                .thread_group()
385                .limits
386                .lock(locked.cast_locked::<TaskRelease>())
387                .set(*resource, rlimit { rlim_cur: *limit, rlim_max: *limit });
388        }
389
390        pids.add_task(&temp_task);
391        Ok(())
392    });
393    Ok(builder)
394}
395
396/// Create a kernel task in the same ThreadGroup as the given `system_task`.
397///
398/// There is no underlying Zircon thread to host the task.
399pub fn create_kernel_thread<L>(
400    locked: &mut Locked<L>,
401    system_task: &Task,
402    initial_name: TaskCommand,
403) -> Result<CurrentTask, Errno>
404where
405    L: LockBefore<TaskRelease>,
406{
407    let mut pids = system_task.kernel().pids.write();
408    let pid = pids.allocate_pid();
409
410    let scheduler_state;
411    let uts_ns;
412    let default_timerslack_ns;
413    {
414        let state = system_task.read();
415        scheduler_state = state.scheduler_state;
416        uts_ns = state.uts_ns.clone();
417        default_timerslack_ns = state.default_timerslack_ns;
418    }
419
420    let live_system_task = system_task.live().unwrap();
421    let current_task: CurrentTask = TaskBuilder::new(Task::new(
422        pid,
423        initial_name,
424        system_task.thread_group().clone(),
425        None,
426        FdTable::default(),
427        live_system_task.mm.to_option_arc(),
428        live_system_task.fs.to_arc(),
429        system_task.clone_creds(),
430        Arc::clone(&live_system_task.abstract_socket_namespace),
431        Arc::clone(&live_system_task.abstract_vsock_namespace),
432        Default::default(),
433        Default::default(),
434        None,
435        scheduler_state,
436        uts_ns,
437        false,
438        SeccompState::default(),
439        SeccompFilterContainer::default(),
440        RobustListHeadPtr::null(&ArchWidth::Arch64),
441        default_timerslack_ns,
442    ))
443    .into();
444    release_on_error!(current_task, locked, {
445        let temp_task = current_task.temp_task();
446        current_task.thread_group().add(&temp_task)?;
447        pids.add_task(&temp_task);
448        Ok(())
449    });
450    Ok(current_task)
451}