Skip to main content

starnix_core/signals/
signal_handling.rs

1// Copyright 2021 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use crate::arch::signal_handling::{
6    RED_ZONE_SIZE, SIG_STACK_SIZE, SignalStackFrame, align_stack_pointer, restore_registers,
7};
8use crate::mm::{MemoryAccessor, MemoryAccessorExt};
9use crate::ptrace::StopState;
10use crate::signals::{KernelSignal, KernelSignalInfo, SignalDetail, SignalInfo, SignalState};
11use crate::task::{
12    ArchExtendedPstateStorage, CurrentTask, ExitStatus, Task, TaskFlags, TaskWriteGuard,
13    ThreadState, Waiter,
14};
15use starnix_logging::{log_info, log_trace, log_warn};
16use starnix_registers::{RegisterState, RegisterStorageEnum};
17use starnix_sync::{LockBefore, Locked, ThreadGroupLimits, Unlocked};
18use starnix_syscalls::SyscallResult;
19use starnix_types::arch::ArchWidth;
20use starnix_uapi::errors::{EINTR, ERESTART_RESTARTBLOCK, Errno};
21use starnix_uapi::resource_limits::Resource;
22use starnix_uapi::signals::{
23    SIGABRT, SIGALRM, SIGBUS, SIGCHLD, SIGCONT, SIGFPE, SIGHUP, SIGILL, SIGINT, SIGIO, SIGKILL,
24    SIGPIPE, SIGPROF, SIGPWR, SIGQUIT, SIGSEGV, SIGSTKFLT, SIGSTOP, SIGSYS, SIGTERM, SIGTRAP,
25    SIGTSTP, SIGTTIN, SIGTTOU, SIGURG, SIGUSR1, SIGUSR2, SIGVTALRM, SIGWINCH, SIGXCPU, SIGXFSZ,
26    SigSet, sigaltstack_contains_pointer,
27};
28use starnix_uapi::user_address::{ArchSpecific, UserAddress};
29use starnix_uapi::{
30    SA_NODEFER, SA_ONSTACK, SA_RESETHAND, SA_SIGINFO, SIG_DFL, SIG_IGN, errno, error, sigaction_t,
31};
32
33/// Indicates where in the signal queue a signal should go.  Signals
34/// can jump the queue when being injected by tools like ptrace.
35#[derive(PartialEq)]
36enum SignalPriority {
37    First,
38    Last,
39}
40
41// `send_signal*()` calls below may fail only for real-time signals (with EAGAIN). They are
42// expected to succeed for all other signals.
43pub fn send_signal_first<L>(
44    locked: &mut Locked<L>,
45    task: &Task,
46    task_state: TaskWriteGuard<'_>,
47    siginfo: SignalInfo,
48) where
49    L: LockBefore<ThreadGroupLimits>,
50{
51    send_signal_prio(locked, task, task_state, siginfo.into(), SignalPriority::First, true)
52        .expect("send_signal(SignalPriority::First) is not expected to fail")
53}
54
55// Sends `signal` to `task`. The signal must be a standard (i.e. not real-time) signal.
56pub fn send_standard_signal<L>(locked: &mut Locked<L>, task: &Task, siginfo: SignalInfo)
57where
58    L: LockBefore<ThreadGroupLimits>,
59{
60    debug_assert!(!siginfo.signal.is_real_time());
61    let state = task.write();
62    send_signal_prio(locked, task, state, siginfo.into(), SignalPriority::Last, false)
63        .expect("send_signal(SignalPriority::Last) is not expected to fail for standard signals.")
64}
65
66pub fn send_signal<L>(locked: &mut Locked<L>, task: &Task, siginfo: SignalInfo) -> Result<(), Errno>
67where
68    L: LockBefore<ThreadGroupLimits>,
69{
70    let state = task.write();
71    send_signal_prio(locked, task, state, siginfo.into(), SignalPriority::Last, false)
72}
73
74pub fn send_freeze_signal<L>(
75    locked: &mut Locked<L>,
76    task: &Task,
77    waiter: Waiter,
78) -> Result<(), Errno>
79where
80    L: LockBefore<ThreadGroupLimits>,
81{
82    let state = task.write();
83    send_signal_prio(
84        locked,
85        task,
86        state,
87        KernelSignalInfo::Freeze(waiter),
88        SignalPriority::First,
89        true,
90    )
91}
92
93fn send_signal_prio<L>(
94    locked: &mut Locked<L>,
95    task: &Task,
96    mut task_state: TaskWriteGuard<'_>,
97    kernel_siginfo: KernelSignalInfo,
98    prio: SignalPriority,
99    force_wake: bool,
100) -> Result<(), Errno>
101where
102    L: LockBefore<ThreadGroupLimits>,
103{
104    let (siginfo, signal, is_masked, was_masked, is_real_time, sigaction, action) =
105        match kernel_siginfo {
106            KernelSignalInfo::User(ref user_siginfo) => {
107                let signal = user_siginfo.signal;
108                let is_masked = task_state.is_signal_masked(signal);
109                let was_masked = task_state.is_signal_masked_by_saved_mask(signal);
110                let sigaction = task.get_signal_action(signal);
111                let action = action_for_signal(&user_siginfo, sigaction);
112                (
113                    Some(user_siginfo.clone()),
114                    Some(signal),
115                    is_masked,
116                    was_masked,
117                    signal.is_real_time(),
118                    Some(sigaction),
119                    Some(action),
120                )
121            }
122            KernelSignalInfo::Freeze(_) => (None, None, false, false, false, None, None),
123        };
124
125    if is_real_time && prio != SignalPriority::First {
126        if task_state.pending_signal_count()
127            >= task.thread_group().get_rlimit(locked, Resource::SIGPENDING) as usize
128        {
129            return error!(EAGAIN);
130        }
131    }
132
133    // If the signal is ignored then it doesn't need to be queued, except the following 2 cases:
134    //  1. The signal is blocked by the current or the original mask. The signal may be unmasked
135    //     later, see `SigtimedwaitTest.IgnoredUnmaskedSignal` gvisor test.
136    //  2. The task is ptraced. In this case we want to queue the signal for signal-delivery-stop.
137    let is_queued = action.is_none()
138        || action != Some(DeliveryAction::Ignore)
139        || is_masked
140        || was_masked
141        || task_state.is_ptraced();
142    if is_queued {
143        match kernel_siginfo {
144            KernelSignalInfo::User(ref siginfo) => {
145                if prio == SignalPriority::First {
146                    task_state.enqueue_signal_front(siginfo.clone());
147                } else {
148                    task_state.enqueue_signal(siginfo.clone());
149                }
150                task_state.set_flags(TaskFlags::SIGNALS_AVAILABLE, true);
151            }
152            KernelSignalInfo::Freeze(waiter) => {
153                task_state.enqueue_kernel_signal(KernelSignal::Freeze(waiter))
154            }
155        }
156    }
157
158    drop(task_state);
159    if is_queued
160        && !is_masked
161        && action.map_or_else(|| true, |action| action.must_interrupt(sigaction))
162    {
163        // Wake the task. Note that any potential signal handler will be executed before
164        // the task returns from the suspend (from the perspective of user space).
165        task.interrupt();
166    }
167
168    // Unstop the process for SIGCONT. Also unstop for SIGKILL, the only signal that can interrupt
169    // a stopped process.
170    if signal == Some(SIGKILL) {
171        task.write().thaw();
172        task.thread_group().set_stopped(StopState::ForceWaking, siginfo, false);
173        task.write().set_stopped(StopState::ForceWaking, None, None, None);
174    } else if signal == Some(SIGCONT) || force_wake {
175        task.thread_group().set_stopped(StopState::Waking, siginfo, false);
176        task.write().set_stopped(StopState::Waking, None, None, None);
177    }
178
179    Ok(())
180}
181
182/// Represents the action to take when signal is delivered.
183///
184/// See https://man7.org/linux/man-pages/man7/signal.7.html.
185#[derive(Debug, PartialEq)]
186pub enum DeliveryAction {
187    Ignore,
188    CallHandler,
189    Terminate,
190    CoreDump,
191    Stop,
192    Continue,
193}
194
195impl DeliveryAction {
196    /// Returns whether the target task must be interrupted to execute the action.
197    ///
198    /// The task will not be interrupted if the signal is the action is the Continue action, or if
199    /// the action is Ignore and the user specifically requested to ignore the signal.
200    pub fn must_interrupt(&self, sigaction: Option<sigaction_t>) -> bool {
201        match *self {
202            Self::Continue => false,
203            Self::Ignore => sigaction.map_or(false, |sa| sa.sa_handler == SIG_IGN),
204            _ => true,
205        }
206    }
207}
208
209pub fn action_for_signal(siginfo: &SignalInfo, sigaction: sigaction_t) -> DeliveryAction {
210    let handler = if siginfo.force && sigaction.sa_handler == SIG_IGN {
211        SIG_DFL
212    } else {
213        sigaction.sa_handler
214    };
215    match handler {
216        SIG_DFL => match siginfo.signal {
217            SIGCHLD | SIGURG | SIGWINCH => DeliveryAction::Ignore,
218            sig if sig.is_real_time() => DeliveryAction::Ignore,
219            SIGHUP | SIGINT | SIGKILL | SIGPIPE | SIGALRM | SIGTERM | SIGUSR1 | SIGUSR2
220            | SIGPROF | SIGVTALRM | SIGSTKFLT | SIGIO | SIGPWR => DeliveryAction::Terminate,
221            SIGQUIT | SIGILL | SIGABRT | SIGFPE | SIGSEGV | SIGBUS | SIGSYS | SIGTRAP | SIGXCPU
222            | SIGXFSZ => DeliveryAction::CoreDump,
223            SIGSTOP | SIGTSTP | SIGTTIN | SIGTTOU => DeliveryAction::Stop,
224            SIGCONT => DeliveryAction::Continue,
225            _ => panic!("Unknown signal"),
226        },
227        SIG_IGN => DeliveryAction::Ignore,
228        _ => DeliveryAction::CallHandler,
229    }
230}
231
232/// Dequeues and handles a pending signal for `current_task`.
233pub fn dequeue_signal(locked: &mut Locked<Unlocked>, current_task: &mut CurrentTask) {
234    let &mut CurrentTask { ref task, ref mut thread_state, .. } = current_task;
235    if !task.should_check_for_pending_signals() {
236        return;
237    }
238
239    let mut task_state = task.write();
240    // This code is occasionally executed as the task is stopping. Stopping /
241    // stopped threads should not get signals.
242    if task.load_stopped().is_stopping_or_stopped() {
243        return;
244    }
245
246    // If there is a kernel signal needs to handle, deliver the signal right away.
247    let kernel_signal = task_state.take_kernel_signal();
248    let siginfo = if kernel_signal.is_some() { None } else { task_state.take_any_signal() };
249    prepare_to_restart_syscall(
250        thread_state,
251        siginfo.as_ref().map(|siginfo| task.thread_group().signal_actions.get(siginfo.signal)),
252    );
253
254    if let Some(ref siginfo) = siginfo {
255        if task_state.ptrace_on_signal_consume() && siginfo.signal != SIGKILL {
256            // Indicate we will be stopping for ptrace at the next opportunity.
257            // Whether you actually deliver the signal is now up to ptrace, so
258            // we can return.
259            task_state.set_stopped(
260                StopState::SignalDeliveryStopping,
261                Some(siginfo.clone()),
262                None,
263                None,
264            );
265            return;
266        }
267    }
268
269    // A syscall may have been waiting with a temporary mask which should be used to dequeue the
270    // signal, but after the signal has been dequeued the old mask should be restored.
271    task_state.restore_signal_mask();
272    {
273        let (clear, set) = if task_state.pending_signal_count() == 0 {
274            (TaskFlags::SIGNALS_AVAILABLE, TaskFlags::empty())
275        } else {
276            (TaskFlags::empty(), TaskFlags::SIGNALS_AVAILABLE)
277        };
278        task_state.update_flags(clear | TaskFlags::TEMPORARY_SIGNAL_MASK, set);
279    };
280
281    if let Some(kernel_signal) = kernel_signal {
282        let KernelSignal::Freeze(waiter) = kernel_signal;
283        drop(task_state);
284
285        waiter.freeze(locked, current_task);
286    } else if let Some(ref siginfo) = siginfo {
287        if let SignalDetail::Timer { timer } = &siginfo.detail {
288            timer.on_signal_delivered();
289        }
290        if let Some(status) = deliver_signal(
291            &task,
292            current_task.thread_state.arch_width(),
293            task_state,
294            siginfo.clone(),
295            &mut current_task.thread_state.registers,
296            &current_task.thread_state.extended_pstate,
297            None,
298        ) {
299            current_task.thread_group_exit(locked, status);
300        }
301    };
302}
303
304pub fn deliver_signal(
305    task: &Task,
306    arch_width: ArchWidth,
307    mut task_state: TaskWriteGuard<'_>,
308    mut siginfo: SignalInfo,
309    registers: &mut RegisterState<RegisterStorageEnum>,
310    extended_pstate: &ArchExtendedPstateStorage,
311    restricted_exception: Option<zx::ExceptionReport>,
312) -> Option<ExitStatus> {
313    loop {
314        let sigaction = task.thread_group().signal_actions.get(siginfo.signal);
315        let action = action_for_signal(&siginfo, sigaction);
316        log_trace!("handling signal {:?} with action {:?}", siginfo, action);
317        match action {
318            DeliveryAction::Ignore => {}
319            DeliveryAction::CallHandler => {
320                let sigaction = task.thread_group().signal_actions.get(siginfo.signal);
321                let signal = siginfo.signal;
322                match dispatch_signal_handler(
323                    task,
324                    arch_width,
325                    registers,
326                    extended_pstate,
327                    task_state.signals_mut(),
328                    siginfo,
329                    sigaction,
330                ) {
331                    Ok(_) => {
332                        // Reset the signal handler if `SA_RESETHAND` was set.
333                        if sigaction.sa_flags & (SA_RESETHAND as u64) != 0 {
334                            let new_sigaction = sigaction_t {
335                                sa_handler: SIG_DFL,
336                                sa_flags: sigaction.sa_flags & !(SA_RESETHAND as u64),
337                                ..sigaction
338                            };
339                            task.thread_group().signal_actions.set(signal, new_sigaction);
340                        }
341                    }
342                    Err(err) => {
343                        log_warn!("failed to deliver signal {:?}: {:?}", signal, err);
344
345                        siginfo = SignalInfo::kernel(SIGSEGV);
346                        // The behavior that we want is:
347                        //  1. If we failed to send a SIGSEGV, or SIGSEGV is masked, or SIGSEGV is
348                        //  ignored, we reset the signal disposition and unmask SIGSEGV.
349                        //  2. Send a SIGSEGV to the program, with the (possibly) updated signal
350                        //  disposition and mask.
351                        let sigaction = task.thread_group().signal_actions.get(siginfo.signal);
352                        let action = action_for_signal(&siginfo, sigaction);
353                        let masked_signals = task_state.signal_mask();
354                        if signal == SIGSEGV
355                            || masked_signals.has_signal(SIGSEGV)
356                            || action == DeliveryAction::Ignore
357                        {
358                            task_state.set_signal_mask(masked_signals & !SigSet::from(SIGSEGV));
359                            task.thread_group().signal_actions.set(SIGSEGV, sigaction_t::default());
360                        }
361
362                        // Try to deliver the SIGSEGV.
363                        // We already checked whether we needed to unmask or reset the signal
364                        // disposition.
365                        // This could not lead to an infinite loop, because if we had a SIGSEGV
366                        // handler, and we failed to send a SIGSEGV, we remove the handler and resend
367                        // the SIGSEGV.
368                        continue;
369                    }
370                }
371            }
372            DeliveryAction::Terminate => {
373                // Release the signals lock. [`ThreadGroup::exit`] sends signals to threads which
374                // will include this one and cause a deadlock re-acquiring the signals lock.
375                drop(task_state);
376                return Some(ExitStatus::Kill(siginfo));
377            }
378            DeliveryAction::CoreDump => {
379                task_state.set_flags(TaskFlags::DUMP_ON_EXIT, true);
380                drop(task_state);
381                if let Some(exception) = restricted_exception {
382                    log_info!(
383                        registers:?=registers,
384                        exception:?=exception;
385                        "Restricted mode exception caused core dump",
386                    );
387                    if let SignalDetail::SigFault { addr } = siginfo.detail {
388                        if let Ok(mm) = task.mm() {
389                            mm.log_memory_map(task, UserAddress::from(addr));
390                        }
391                    }
392                }
393                return Some(ExitStatus::CoreDump(siginfo));
394            }
395            DeliveryAction::Stop => {
396                drop(task_state);
397                task.thread_group().set_stopped(StopState::GroupStopping, Some(siginfo), false);
398            }
399            DeliveryAction::Continue => {
400                // Nothing to do. Effect already happened when the signal was raised.
401            }
402        };
403        break;
404    }
405    None
406}
407
408/// Prepares `current` state to execute the signal handler stored in `action`.
409///
410/// This function stores the state required to restore after the signal handler on the stack.
411fn dispatch_signal_handler(
412    task: &Task,
413    arch_width: ArchWidth,
414    registers: &mut RegisterState<RegisterStorageEnum>,
415    extended_pstate: &ArchExtendedPstateStorage,
416    signal_state: &mut SignalState,
417    siginfo: SignalInfo,
418    action: sigaction_t,
419) -> Result<(), Errno> {
420    let main_stack = registers.stack_pointer_register().checked_sub(RED_ZONE_SIZE);
421    let stack_bottom = if (action.sa_flags & SA_ONSTACK as u64) != 0 {
422        match signal_state.alt_stack {
423            Some(sigaltstack) => {
424                match main_stack {
425                    // Only install the sigaltstack if the stack pointer is not already in it.
426                    Some(sp) if sigaltstack_contains_pointer(&sigaltstack, sp) => main_stack,
427                    _ => {
428                        // Since the stack grows down, the size is added to the ss_sp when
429                        // calculating the "bottom" of the stack.
430                        // Use the main stack if sigaltstack overflows.
431                        sigaltstack
432                            .ss_sp
433                            .addr
434                            .checked_add(sigaltstack.ss_size)
435                            .map(|sp| sp as u64)
436                            .or(main_stack)
437                    }
438                }
439            }
440            None => main_stack,
441        }
442    } else {
443        main_stack
444    }
445    .ok_or_else(|| errno!(EINVAL))?;
446
447    let stack_pointer =
448        align_stack_pointer(stack_bottom.checked_sub(SIG_STACK_SIZE as u64).ok_or_else(|| {
449            errno!(
450                EINVAL,
451                format!(
452                    "Subtracting SIG_STACK_SIZE ({}) from stack bottom ({}) overflowed",
453                    SIG_STACK_SIZE, stack_bottom
454                )
455            )
456        })?);
457
458    // Check that if the stack pointer is inside altstack, the entire signal stack is inside
459    // altstack.
460    if let Some(alt_stack) = signal_state.alt_stack {
461        if sigaltstack_contains_pointer(&alt_stack, stack_pointer)
462            != sigaltstack_contains_pointer(&alt_stack, stack_bottom)
463        {
464            return error!(EINVAL);
465        }
466    }
467
468    let signal_stack_frame = SignalStackFrame::new(
469        task,
470        arch_width,
471        registers,
472        extended_pstate,
473        signal_state,
474        &siginfo,
475        action,
476        UserAddress::from(stack_pointer),
477    )?;
478
479    // Write the signal stack frame at the updated stack pointer.
480    task.write_memory(UserAddress::from(stack_pointer), signal_stack_frame.as_bytes())?;
481
482    let mut mask: SigSet = action.sa_mask.into();
483    if action.sa_flags & (SA_NODEFER as u64) == 0 {
484        mask = mask | siginfo.signal.into();
485    }
486
487    // Preserve the existing mask when handling a nested signal
488    signal_state.set_mask(mask | signal_state.mask());
489
490    registers.set_stack_pointer_register(stack_pointer);
491    registers.set_arg0_register(siginfo.signal.number() as u64);
492    if (action.sa_flags & SA_SIGINFO as u64) != 0 {
493        registers.set_arg1_register(
494            stack_pointer + memoffset::offset_of!(SignalStackFrame, siginfo_bytes) as u64,
495        );
496        registers.set_arg2_register(
497            stack_pointer + memoffset::offset_of!(SignalStackFrame, context) as u64,
498        );
499    }
500    registers.set_instruction_pointer_register(action.sa_handler.addr);
501    registers.reset_flags(); // TODO(https://fxbug.dev/413070731): Verify and update the logic in resetting the flags.
502
503    Ok(())
504}
505
506pub fn restore_from_signal_handler(current_task: &mut CurrentTask) -> Result<(), Errno> {
507    // Read the signal stack frame from memory.
508    let signal_frame_address = UserAddress::from(align_stack_pointer(
509        current_task.thread_state.registers.stack_pointer_register(),
510    ));
511    let signal_stack_bytes =
512        current_task.read_memory_to_array::<SIG_STACK_SIZE>(signal_frame_address)?;
513
514    // Grab the registers state from the stack frame.
515    let signal_stack_frame = SignalStackFrame::from_bytes(signal_stack_bytes);
516    restore_registers(current_task, &signal_stack_frame, signal_frame_address)?;
517
518    // Restore the stored signal mask.
519    current_task
520        .write()
521        .set_signal_mask(signal_stack_frame.get_signal_mask(current_task.is_arch32()));
522
523    Ok(())
524}
525
526/// Maybe adjust a task's registers to restart a syscall once the task switches back to userspace,
527/// based on whether the task previously had a syscall return with an error code indicating that a
528/// restart was required.
529pub fn prepare_to_restart_syscall(
530    thread_state: &mut ThreadState<RegisterStorageEnum>,
531    sigaction: Option<sigaction_t>,
532) {
533    // Taking the value ensures each syscall is only considered for restart once.
534    let Some(err) = thread_state.restart_code.take() else {
535        // Don't interact with register state at all unless other kernel code indicates that we may
536        // need to restart.
537        return;
538    };
539
540    if err.should_restart(sigaction) {
541        // This error code is returned for system calls that need restart_syscall() to adjust time
542        // related arguments when the syscall is restarted. Other syscall restarts can be dispatched
543        // directly to the original syscall implementation.
544        if err == ERESTART_RESTARTBLOCK {
545            thread_state.registers.prepare_for_custom_restart();
546        } else {
547            thread_state.registers.restore_original_return_register();
548        }
549
550        // TODO(https://fxbug.dev/388051291) figure out whether Linux relies on registers here
551        thread_state.registers.rewind_syscall_instruction();
552    } else {
553        thread_state.registers.set_return_register(EINTR.return_value());
554    }
555}
556
557pub fn sys_restart_syscall(
558    locked: &mut Locked<Unlocked>,
559    current_task: &mut CurrentTask,
560) -> Result<SyscallResult, Errno> {
561    match current_task.thread_state.syscall_restart_func.take() {
562        Some(f) => f(locked, current_task),
563        None => {
564            // This may indicate a bug where a syscall returns ERESTART_RESTARTBLOCK without
565            // setting a restart func. But it can also be triggered by userspace, e.g. by directly
566            // calling restart_syscall or injecting an ERESTART_RESTARTBLOCK error through ptrace.
567            log_warn!("restart_syscall called, but nothing to restart");
568            error!(EINTR)
569        }
570    }
571}
572
573/// Test utilities for signal handling.
574#[cfg(test)]
575pub(crate) mod testing {
576    use super::*;
577    use crate::testing::AutoReleasableTask;
578    use std::ops::DerefMut as _;
579
580    pub(crate) fn dequeue_signal_for_test(
581        locked: &mut Locked<Unlocked>,
582        current_task: &mut AutoReleasableTask,
583    ) {
584        dequeue_signal(locked, current_task.deref_mut());
585    }
586}