1use crate::mm::MemoryManager;
6use crate::security;
7use crate::signals::SignalActions;
8use crate::task::{
9 CurrentTask, Kernel, PidTable, ProcessGroup, RobustListHeadPtr, SeccompFilterContainer,
10 SeccompState, Task, TaskBuilder, ThreadGroup, ThreadGroupParent, ThreadGroupWriteGuard,
11};
12use crate::vfs::{FdTable, FsContext};
13use starnix_sync::{
14 LockBefore, Locked, ProcessGroupState, RwLockWriteGuard, TaskRelease, Unlocked,
15};
16use starnix_task_command::TaskCommand;
17use starnix_types::arch::ArchWidth;
18use starnix_types::ownership::TempRef;
19use starnix_types::release_on_error;
20use starnix_uapi::auth::Credentials;
21use starnix_uapi::errors::Errno;
22use starnix_uapi::resource_limits::Resource;
23use starnix_uapi::signals::{SIGCHLD, Signal};
24use starnix_uapi::{errno, error, from_status_like_fdio, pid_t, rlimit};
25use std::ffi::CString;
26use std::sync::Arc;
27
28pub struct TaskInfo {
30 pub thread: Option<zx::Thread>,
32
33 pub thread_group: Arc<ThreadGroup>,
35
36 pub memory_manager: Option<Arc<MemoryManager>>,
38}
39
40pub fn create_zircon_process<L>(
41 locked: &mut Locked<L>,
42 kernel: &Arc<Kernel>,
43 parent: Option<ThreadGroupWriteGuard<'_>>,
44 pid: pid_t,
45 exit_signal: Option<Signal>,
46 process_group: Arc<ProcessGroup>,
47 signal_actions: Arc<SignalActions>,
48 name: TaskCommand,
49) -> Result<TaskInfo, Errno>
50where
51 L: LockBefore<ProcessGroupState>,
52{
53 if kernel.is_shutting_down() {
55 return error!(EBUSY);
56 }
57 let (process, root_vmar) =
58 create_shared(&kernel.kthreads.starnix_process, zx::ProcessOptions::empty(), name)
59 .map_err(|status| from_status_like_fdio!(status))?;
60
61 fuchsia_runtime::job_default()
63 .set_critical(zx::JobCriticalOptions::RETCODE_NONZERO, &process)
64 .map_err(|status| from_status_like_fdio!(status))?;
65
66 let thread_group = ThreadGroup::new(
67 locked,
68 kernel.clone(),
69 process,
70 root_vmar,
71 parent,
72 pid,
73 exit_signal,
74 process_group,
75 signal_actions,
76 );
77
78 Ok(TaskInfo { thread: None, thread_group, memory_manager: None })
79}
80
81fn create_shared(
91 process: &zx::Process,
92 options: zx::ProcessOptions,
93 name: TaskCommand,
94) -> Result<(zx::Process, zx::Vmar), zx::Status> {
95 let self_raw = process.raw_handle();
96 let name_bytes = name.as_bytes();
97 let mut process_out = 0;
98 let mut restricted_vmar_out = 0;
99 #[allow(
100 clippy::undocumented_unsafe_blocks,
101 reason = "Force documented unsafe blocks in Starnix"
102 )]
103 let status = unsafe {
104 zx::sys::zx_process_create_shared(
105 self_raw,
106 options.bits(),
107 name_bytes.as_ptr(),
108 name_bytes.len(),
109 &mut process_out,
110 &mut restricted_vmar_out,
111 )
112 };
113 zx::ok(status)?;
114 #[allow(
115 clippy::undocumented_unsafe_blocks,
116 reason = "Force documented unsafe blocks in Starnix"
117 )]
118 unsafe {
119 Ok((
120 zx::Process::from(zx::NullableHandle::from_raw(process_out)),
121 zx::Vmar::from(zx::NullableHandle::from_raw(restricted_vmar_out)),
122 ))
123 }
124}
125
126pub fn create_init_child_process<L>(
145 locked: &mut Locked<L>,
146 kernel: &Arc<Kernel>,
147 initial_name: TaskCommand,
148 mut creds: Credentials,
149 seclabel: Option<&CString>,
150) -> Result<TaskBuilder, Errno>
151where
152 L: LockBefore<TaskRelease>,
153{
154 let weak_init = kernel.pids.read().get_task(1);
155 let init_task = weak_init.upgrade().ok_or_else(|| errno!(EINVAL))?;
156 let init_live = init_task.live()?;
157
158 let security_state = if let Some(seclabel) = seclabel {
159 security::task_for_context(&init_task, seclabel.as_bytes().into())?
160 } else if let Some(default_seclabel) = kernel.features.default_seclabel.as_ref() {
161 security::task_for_context(&init_task, default_seclabel.as_bytes().into())?
162 } else {
163 security::task_for_context(&init_task, b"".into()).map_err(|_| {
165 errno!(EINVAL, "Container has SELinux enabled but no Security Context specified")
166 })?
167 };
168 creds.security_state = security_state;
169
170 let task = create_task(
171 locked,
172 kernel,
173 initial_name.clone(),
174 init_live.fs().fork(),
175 |locked, pid, process_group| {
176 create_zircon_process(
177 locked.cast_locked::<TaskRelease>(),
178 kernel,
179 None,
180 pid,
181 Some(SIGCHLD),
182 process_group,
183 SignalActions::default(),
184 initial_name.clone(),
185 )
186 },
187 creds.into(),
188 )?;
189 {
190 let mut init_writer = init_task.thread_group().write();
191 let mut new_process_writer = task.thread_group().write();
192 new_process_writer.parent =
193 Some(ThreadGroupParent::new(Arc::downgrade(&init_task.thread_group())));
194 init_writer.children.insert(task.tid, Arc::downgrade(task.thread_group()));
195 }
196 let limits = init_task.thread_group().limits.lock(locked.cast_locked::<TaskRelease>()).clone();
199 *task.thread_group().limits.lock(locked.cast_locked::<TaskRelease>()) = limits;
200 Ok(task)
201}
202
203pub fn create_init_process(
222 locked: &mut Locked<Unlocked>,
223 kernel: &Arc<Kernel>,
224 pid: pid_t,
225 initial_name: TaskCommand,
226 fs: Arc<FsContext>,
227 rlimits: &[(Resource, u64)],
228) -> Result<TaskBuilder, Errno> {
229 let pids = kernel.pids.write();
230 create_task_with_pid(
231 locked,
232 kernel,
233 pids,
234 pid,
235 initial_name.clone(),
236 fs,
237 |locked, pid, process_group| {
238 create_zircon_process(
239 locked,
240 kernel,
241 None,
242 pid,
243 Some(SIGCHLD),
244 process_group,
245 SignalActions::default(),
246 initial_name.clone(),
247 )
248 },
249 Credentials::root(),
250 rlimits,
251 )
252}
253
254pub fn create_system_task<L>(
266 locked: &mut Locked<L>,
267 kernel: &Arc<Kernel>,
268 fs: Arc<FsContext>,
269) -> Result<CurrentTask, Errno>
270where
271 L: LockBefore<TaskRelease>,
272{
273 let builder = create_task(
274 locked,
275 kernel,
276 TaskCommand::new(b"kthreadd"),
277 fs,
278 |locked, pid, process_group| {
279 let thread_group = ThreadGroup::new(
280 locked.cast_locked::<TaskRelease>(),
281 kernel.clone(),
282 zx::Process::invalid(),
283 zx::Vmar::invalid(),
284 None,
285 pid,
286 Some(SIGCHLD),
287 process_group,
288 SignalActions::default(),
289 );
290 Ok(TaskInfo { thread: None, thread_group, memory_manager: None }.into())
291 },
292 Credentials::root(),
293 )?;
294 Ok(builder.into())
295}
296
297pub fn create_task<F, L>(
298 locked: &mut Locked<L>,
299 kernel: &Kernel,
300 initial_name: TaskCommand,
301 root_fs: Arc<FsContext>,
302 task_info_factory: F,
303 creds: Arc<Credentials>,
304) -> Result<TaskBuilder, Errno>
305where
306 F: FnOnce(&mut Locked<L>, i32, Arc<ProcessGroup>) -> Result<TaskInfo, Errno>,
307 L: LockBefore<TaskRelease>,
308{
309 let mut pids = kernel.pids.write();
310 let pid = pids.allocate_pid();
311 create_task_with_pid(
312 locked,
313 kernel,
314 pids,
315 pid,
316 initial_name,
317 root_fs,
318 task_info_factory,
319 creds,
320 &[],
321 )
322}
323
324fn create_task_with_pid<F, L>(
325 locked: &mut Locked<L>,
326 kernel: &Kernel,
327 mut pids: RwLockWriteGuard<'_, PidTable>,
328 pid: pid_t,
329 initial_name: TaskCommand,
330 root_fs: Arc<FsContext>,
331 task_info_factory: F,
332 creds: Arc<Credentials>,
333 rlimits: &[(Resource, u64)],
334) -> Result<TaskBuilder, Errno>
335where
336 F: FnOnce(&mut Locked<L>, i32, Arc<ProcessGroup>) -> Result<TaskInfo, Errno>,
337 L: LockBefore<TaskRelease>,
338{
339 debug_assert!(pids.get_task(pid).upgrade().is_none());
340
341 let process_group = ProcessGroup::new(pid, None);
342 pids.add_process_group(process_group.clone());
343
344 let TaskInfo { thread, thread_group, memory_manager } =
345 task_info_factory(locked, pid, process_group.clone())?;
346
347 process_group.insert(locked.cast_locked::<TaskRelease>(), &thread_group);
348
349 let default_timerslack = 50_000;
354 let builder = TaskBuilder {
355 task: Task::new(
356 pid,
357 initial_name,
358 thread_group,
359 thread,
360 FdTable::default(),
361 memory_manager,
362 root_fs,
363 creds,
364 Arc::clone(&kernel.default_abstract_socket_namespace),
365 Arc::clone(&kernel.default_abstract_vsock_namespace),
366 Default::default(),
367 Default::default(),
368 None,
369 Default::default(),
370 kernel.root_uts_ns.clone(),
371 false,
372 SeccompState::default(),
373 SeccompFilterContainer::default(),
374 RobustListHeadPtr::null(&ArchWidth::Arch64),
375 default_timerslack,
376 ),
377 thread_state: Default::default(),
378 };
379 release_on_error!(builder, locked, {
380 let temp_task = TempRef::from(&builder.task);
381 builder.thread_group().add(&temp_task)?;
382 for (resource, limit) in rlimits {
383 builder
384 .thread_group()
385 .limits
386 .lock(locked.cast_locked::<TaskRelease>())
387 .set(*resource, rlimit { rlim_cur: *limit, rlim_max: *limit });
388 }
389
390 pids.add_task(&temp_task);
391 Ok(())
392 });
393 Ok(builder)
394}
395
396pub fn create_kernel_thread<L>(
400 locked: &mut Locked<L>,
401 system_task: &Task,
402 initial_name: TaskCommand,
403) -> Result<CurrentTask, Errno>
404where
405 L: LockBefore<TaskRelease>,
406{
407 let mut pids = system_task.kernel().pids.write();
408 let pid = pids.allocate_pid();
409
410 let scheduler_state;
411 let uts_ns;
412 let default_timerslack_ns;
413 {
414 let state = system_task.read();
415 scheduler_state = state.scheduler_state;
416 uts_ns = state.uts_ns.clone();
417 default_timerslack_ns = state.default_timerslack_ns;
418 }
419
420 let live_system_task = system_task.live().unwrap();
421 let current_task: CurrentTask = TaskBuilder::new(Task::new(
422 pid,
423 initial_name,
424 system_task.thread_group().clone(),
425 None,
426 FdTable::default(),
427 live_system_task.mm.to_option_arc(),
428 live_system_task.fs.to_arc(),
429 system_task.clone_creds(),
430 Arc::clone(&live_system_task.abstract_socket_namespace),
431 Arc::clone(&live_system_task.abstract_vsock_namespace),
432 Default::default(),
433 Default::default(),
434 None,
435 scheduler_state,
436 uts_ns,
437 false,
438 SeccompState::default(),
439 SeccompFilterContainer::default(),
440 RobustListHeadPtr::null(&ArchWidth::Arch64),
441 default_timerslack_ns,
442 ))
443 .into();
444 release_on_error!(current_task, locked, {
445 let temp_task = current_task.temp_task();
446 current_task.thread_group().add(&temp_task)?;
447 pids.add_task(&temp_task);
448 Ok(())
449 });
450 Ok(current_task)
451}