starnix_core/signals/
signal_handling.rs

1// Copyright 2021 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use crate::arch::signal_handling::{
6    RED_ZONE_SIZE, SIG_STACK_SIZE, SignalStackFrame, align_stack_pointer, restore_registers,
7};
8use crate::mm::{MemoryAccessor, MemoryAccessorExt};
9use crate::signals::{KernelSignal, KernelSignalInfo, SignalDetail, SignalInfo, SignalState};
10use crate::task::{
11    CurrentTask, ExitStatus, StopState, Task, TaskFlags, TaskWriteGuard, ThreadState, Waiter,
12};
13use extended_pstate::ExtendedPstateState;
14use starnix_logging::{log_info, log_trace, log_warn};
15use starnix_registers::RegisterState;
16use starnix_sync::{LockBefore, Locked, ThreadGroupLimits, Unlocked};
17use starnix_syscalls::SyscallResult;
18use starnix_types::arch::ArchWidth;
19use starnix_uapi::errors::{EINTR, ERESTART_RESTARTBLOCK, Errno};
20use starnix_uapi::resource_limits::Resource;
21use starnix_uapi::signals::{
22    SIGABRT, SIGALRM, SIGBUS, SIGCHLD, SIGCONT, SIGFPE, SIGHUP, SIGILL, SIGINT, SIGIO, SIGKILL,
23    SIGPIPE, SIGPROF, SIGPWR, SIGQUIT, SIGSEGV, SIGSTKFLT, SIGSTOP, SIGSYS, SIGTERM, SIGTRAP,
24    SIGTSTP, SIGTTIN, SIGTTOU, SIGURG, SIGUSR1, SIGUSR2, SIGVTALRM, SIGWINCH, SIGXCPU, SIGXFSZ,
25    SigSet, sigaltstack_contains_pointer,
26};
27use starnix_uapi::user_address::UserAddress;
28use starnix_uapi::{
29    SA_NODEFER, SA_ONSTACK, SA_RESETHAND, SA_SIGINFO, SIG_DFL, SIG_IGN, errno, error, sigaction_t,
30};
31
32/// Indicates where in the signal queue a signal should go.  Signals
33/// can jump the queue when being injected by tools like ptrace.
34#[derive(PartialEq)]
35enum SignalPriority {
36    First,
37    Last,
38}
39
40// `send_signal*()` calls below may fail only for real-time signals (with EAGAIN). They are
41// expected to succeed for all other signals.
42pub fn send_signal_first<L>(
43    locked: &mut Locked<L>,
44    task: &Task,
45    task_state: TaskWriteGuard<'_>,
46    siginfo: SignalInfo,
47) where
48    L: LockBefore<ThreadGroupLimits>,
49{
50    send_signal_prio(locked, task, task_state, siginfo.into(), SignalPriority::First, true)
51        .expect("send_signal(SignalPriority::First) is not expected to fail")
52}
53
54// Sends `signal` to `task`. The signal must be a standard (i.e. not real-time) signal.
55pub fn send_standard_signal<L>(locked: &mut Locked<L>, task: &Task, siginfo: SignalInfo)
56where
57    L: LockBefore<ThreadGroupLimits>,
58{
59    debug_assert!(!siginfo.signal.is_real_time());
60    let state = task.write();
61    send_signal_prio(locked, task, state, siginfo.into(), SignalPriority::Last, false)
62        .expect("send_signal(SignalPriority::Last) is not expected to fail for standard signals.")
63}
64
65pub fn send_signal<L>(locked: &mut Locked<L>, task: &Task, siginfo: SignalInfo) -> Result<(), Errno>
66where
67    L: LockBefore<ThreadGroupLimits>,
68{
69    let state = task.write();
70    send_signal_prio(locked, task, state, siginfo.into(), SignalPriority::Last, false)
71}
72
73pub fn send_freeze_signal<L>(
74    locked: &mut Locked<L>,
75    task: &Task,
76    waiter: Waiter,
77) -> Result<(), Errno>
78where
79    L: LockBefore<ThreadGroupLimits>,
80{
81    let state = task.write();
82    send_signal_prio(
83        locked,
84        task,
85        state,
86        KernelSignalInfo::Freeze(waiter),
87        SignalPriority::First,
88        true,
89    )
90}
91
92fn send_signal_prio<L>(
93    locked: &mut Locked<L>,
94    task: &Task,
95    mut task_state: TaskWriteGuard<'_>,
96    kernel_siginfo: KernelSignalInfo,
97    prio: SignalPriority,
98    force_wake: bool,
99) -> Result<(), Errno>
100where
101    L: LockBefore<ThreadGroupLimits>,
102{
103    let (siginfo, signal, is_masked, was_masked, is_real_time, sigaction, action) =
104        match kernel_siginfo {
105            KernelSignalInfo::User(ref user_siginfo) => {
106                let signal = user_siginfo.signal;
107                let is_masked = task_state.is_signal_masked(signal);
108                let was_masked = task_state.is_signal_masked_by_saved_mask(signal);
109                let sigaction = task.get_signal_action(signal);
110                let action = action_for_signal(&user_siginfo, sigaction);
111                (
112                    Some(user_siginfo.clone()),
113                    Some(signal),
114                    is_masked,
115                    was_masked,
116                    signal.is_real_time(),
117                    Some(sigaction),
118                    Some(action),
119                )
120            }
121            KernelSignalInfo::Freeze(_) => (None, None, false, false, false, None, None),
122        };
123
124    if is_real_time && prio != SignalPriority::First {
125        if task_state.pending_signal_count()
126            >= task.thread_group().get_rlimit(locked, Resource::SIGPENDING) as usize
127        {
128            return error!(EAGAIN);
129        }
130    }
131
132    // If the signal is ignored then it doesn't need to be queued, except the following 2 cases:
133    //  1. The signal is blocked by the current or the original mask. The signal may be unmasked
134    //     later, see `SigtimedwaitTest.IgnoredUnmaskedSignal` gvisor test.
135    //  2. The task is ptraced. In this case we want to queue the signal for signal-delivery-stop.
136    let is_queued = action.is_none()
137        || action != Some(DeliveryAction::Ignore)
138        || is_masked
139        || was_masked
140        || task_state.is_ptraced();
141    if is_queued {
142        match kernel_siginfo {
143            KernelSignalInfo::User(ref siginfo) => {
144                if prio == SignalPriority::First {
145                    task_state.enqueue_signal_front(siginfo.clone());
146                } else {
147                    task_state.enqueue_signal(siginfo.clone());
148                }
149                task_state.set_flags(TaskFlags::SIGNALS_AVAILABLE, true);
150            }
151            KernelSignalInfo::Freeze(waiter) => {
152                task_state.enqueue_kernel_signal(KernelSignal::Freeze(waiter))
153            }
154        }
155    }
156
157    if action == Some(DeliveryAction::CoreDump) && !task_state.is_ptraced() {
158        // If the signal is a core dump, request a backtrace to get information about the sender.
159        // TODO(https://fxbug.dev/356732164) collect a backtrace ourselves.
160        log_info!(
161            "Sending a core dump signal {signal:?} to task {}. Take a backtrace of the sender for debugging",
162            task.tid
163        );
164        debug::backtrace_request_current_thread();
165    }
166
167    drop(task_state);
168    if is_queued
169        && !is_masked
170        && action.map_or_else(|| true, |action| action.must_interrupt(sigaction))
171    {
172        // Wake the task. Note that any potential signal handler will be executed before
173        // the task returns from the suspend (from the perspective of user space).
174        task.interrupt();
175    }
176
177    // Unstop the process for SIGCONT. Also unstop for SIGKILL, the only signal that can interrupt
178    // a stopped process.
179    if signal == Some(SIGKILL) {
180        task.write().thaw();
181        task.thread_group().set_stopped(StopState::ForceWaking, siginfo, false);
182        task.write().set_stopped(StopState::ForceWaking, None, None, None);
183    } else if signal == Some(SIGCONT) || force_wake {
184        task.thread_group().set_stopped(StopState::Waking, siginfo, false);
185        task.write().set_stopped(StopState::Waking, None, None, None);
186    }
187
188    Ok(())
189}
190
191/// Represents the action to take when signal is delivered.
192///
193/// See https://man7.org/linux/man-pages/man7/signal.7.html.
194#[derive(Debug, PartialEq)]
195pub enum DeliveryAction {
196    Ignore,
197    CallHandler,
198    Terminate,
199    CoreDump,
200    Stop,
201    Continue,
202}
203
204impl DeliveryAction {
205    /// Returns whether the target task must be interrupted to execute the action.
206    ///
207    /// The task will not be interrupted if the signal is the action is the Continue action, or if
208    /// the action is Ignore and the user specifically requested to ignore the signal.
209    pub fn must_interrupt(&self, sigaction: Option<sigaction_t>) -> bool {
210        match *self {
211            Self::Continue => false,
212            Self::Ignore => sigaction.map_or(false, |sa| sa.sa_handler == SIG_IGN),
213            _ => true,
214        }
215    }
216}
217
218pub fn action_for_signal(siginfo: &SignalInfo, sigaction: sigaction_t) -> DeliveryAction {
219    let handler = if siginfo.force && sigaction.sa_handler == SIG_IGN {
220        SIG_DFL
221    } else {
222        sigaction.sa_handler
223    };
224    match handler {
225        SIG_DFL => match siginfo.signal {
226            SIGCHLD | SIGURG | SIGWINCH => DeliveryAction::Ignore,
227            sig if sig.is_real_time() => DeliveryAction::Ignore,
228            SIGHUP | SIGINT | SIGKILL | SIGPIPE | SIGALRM | SIGTERM | SIGUSR1 | SIGUSR2
229            | SIGPROF | SIGVTALRM | SIGSTKFLT | SIGIO | SIGPWR => DeliveryAction::Terminate,
230            SIGQUIT | SIGILL | SIGABRT | SIGFPE | SIGSEGV | SIGBUS | SIGSYS | SIGTRAP | SIGXCPU
231            | SIGXFSZ => DeliveryAction::CoreDump,
232            SIGSTOP | SIGTSTP | SIGTTIN | SIGTTOU => DeliveryAction::Stop,
233            SIGCONT => DeliveryAction::Continue,
234            _ => panic!("Unknown signal"),
235        },
236        SIG_IGN => DeliveryAction::Ignore,
237        _ => DeliveryAction::CallHandler,
238    }
239}
240
241/// Dequeues and handles a pending signal for `current_task`.
242pub fn dequeue_signal(locked: &mut Locked<Unlocked>, current_task: &mut CurrentTask) {
243    let &mut CurrentTask { ref task, ref mut thread_state, .. } = current_task;
244    let mut task_state = task.write();
245    // This code is occasionally executed as the task is stopping. Stopping /
246    // stopped threads should not get signals.
247    if task.load_stopped().is_stopping_or_stopped() {
248        return;
249    }
250
251    // If there is a kernel signal needs to handle, deliver the signal right away.
252    let kernel_signal = task_state.take_kernel_signal();
253    let siginfo = if kernel_signal.is_some() { None } else { task_state.take_any_signal() };
254    prepare_to_restart_syscall(
255        thread_state,
256        siginfo.as_ref().map(|siginfo| task.thread_group().signal_actions.get(siginfo.signal)),
257    );
258
259    if let Some(ref siginfo) = siginfo {
260        if task_state.ptrace_on_signal_consume() && siginfo.signal != SIGKILL {
261            // Indicate we will be stopping for ptrace at the next opportunity.
262            // Whether you actually deliver the signal is now up to ptrace, so
263            // we can return.
264            task_state.set_stopped(
265                StopState::SignalDeliveryStopping,
266                Some(siginfo.clone()),
267                None,
268                None,
269            );
270            return;
271        }
272    }
273
274    // A syscall may have been waiting with a temporary mask which should be used to dequeue the
275    // signal, but after the signal has been dequeued the old mask should be restored.
276    task_state.restore_signal_mask();
277    {
278        let (clear, set) = if task_state.pending_signal_count() == 0 {
279            (TaskFlags::SIGNALS_AVAILABLE, TaskFlags::empty())
280        } else {
281            (TaskFlags::empty(), TaskFlags::SIGNALS_AVAILABLE)
282        };
283        task_state.update_flags(clear | TaskFlags::TEMPORARY_SIGNAL_MASK, set);
284    };
285
286    if let Some(kernel_signal) = kernel_signal {
287        let KernelSignal::Freeze(waiter) = kernel_signal;
288        drop(task_state);
289
290        waiter.freeze(locked, current_task);
291    } else if let Some(ref siginfo) = siginfo {
292        if let SignalDetail::Timer { timer } = &siginfo.detail {
293            timer.on_signal_delivered();
294        }
295        if let Some(status) = deliver_signal(
296            &task,
297            current_task.thread_state.arch_width,
298            task_state,
299            siginfo.clone(),
300            &mut current_task.thread_state.registers,
301            &current_task.thread_state.extended_pstate,
302            None,
303        ) {
304            current_task.thread_group_exit(locked, status);
305        }
306    };
307}
308
309pub fn deliver_signal(
310    task: &Task,
311    arch_width: ArchWidth,
312    mut task_state: TaskWriteGuard<'_>,
313    mut siginfo: SignalInfo,
314    registers: &mut RegisterState,
315    extended_pstate: &ExtendedPstateState,
316    restricted_exception: Option<zx::ExceptionReport>,
317) -> Option<ExitStatus> {
318    loop {
319        let sigaction = task.thread_group().signal_actions.get(siginfo.signal);
320        let action = action_for_signal(&siginfo, sigaction);
321        log_trace!("handling signal {:?} with action {:?}", siginfo, action);
322        match action {
323            DeliveryAction::Ignore => {}
324            DeliveryAction::CallHandler => {
325                let sigaction = task.thread_group().signal_actions.get(siginfo.signal);
326                let signal = siginfo.signal;
327                match dispatch_signal_handler(
328                    task,
329                    arch_width,
330                    registers,
331                    extended_pstate,
332                    task_state.signals_mut(),
333                    siginfo,
334                    sigaction,
335                ) {
336                    Ok(_) => {
337                        // Reset the signal handler if `SA_RESETHAND` was set.
338                        if sigaction.sa_flags & (SA_RESETHAND as u64) != 0 {
339                            let new_sigaction = sigaction_t {
340                                sa_handler: SIG_DFL,
341                                sa_flags: sigaction.sa_flags & !(SA_RESETHAND as u64),
342                                ..sigaction
343                            };
344                            task.thread_group().signal_actions.set(signal, new_sigaction);
345                        }
346                    }
347                    Err(err) => {
348                        log_warn!("failed to deliver signal {:?}: {:?}", signal, err);
349
350                        siginfo = SignalInfo::default(SIGSEGV);
351                        // The behavior that we want is:
352                        //  1. If we failed to send a SIGSEGV, or SIGSEGV is masked, or SIGSEGV is
353                        //  ignored, we reset the signal disposition and unmask SIGSEGV.
354                        //  2. Send a SIGSEGV to the program, with the (possibly) updated signal
355                        //  disposition and mask.
356                        let sigaction = task.thread_group().signal_actions.get(siginfo.signal);
357                        let action = action_for_signal(&siginfo, sigaction);
358                        let masked_signals = task_state.signal_mask();
359                        if signal == SIGSEGV
360                            || masked_signals.has_signal(SIGSEGV)
361                            || action == DeliveryAction::Ignore
362                        {
363                            task_state.set_signal_mask(masked_signals & !SigSet::from(SIGSEGV));
364                            task.thread_group().signal_actions.set(SIGSEGV, sigaction_t::default());
365                        }
366
367                        // Try to deliver the SIGSEGV.
368                        // We already checked whether we needed to unmask or reset the signal
369                        // disposition.
370                        // This could not lead to an infinite loop, because if we had a SIGSEGV
371                        // handler, and we failed to send a SIGSEGV, we remove the handler and resend
372                        // the SIGSEGV.
373                        continue;
374                    }
375                }
376            }
377            DeliveryAction::Terminate => {
378                // Release the signals lock. [`ThreadGroup::exit`] sends signals to threads which
379                // will include this one and cause a deadlock re-acquiring the signals lock.
380                drop(task_state);
381                return Some(ExitStatus::Kill(siginfo));
382            }
383            DeliveryAction::CoreDump => {
384                task_state.set_flags(TaskFlags::DUMP_ON_EXIT, true);
385                drop(task_state);
386                if let Some(exception) = restricted_exception {
387                    log_info!(
388                        registers:?=registers,
389                        exception:?=exception;
390                        "Restricted mode exception caused core dump",
391                    );
392                }
393                return Some(ExitStatus::CoreDump(siginfo));
394            }
395            DeliveryAction::Stop => {
396                drop(task_state);
397                task.thread_group().set_stopped(StopState::GroupStopping, Some(siginfo), false);
398            }
399            DeliveryAction::Continue => {
400                // Nothing to do. Effect already happened when the signal was raised.
401            }
402        };
403        break;
404    }
405    None
406}
407
408/// Prepares `current` state to execute the signal handler stored in `action`.
409///
410/// This function stores the state required to restore after the signal handler on the stack.
411fn dispatch_signal_handler(
412    task: &Task,
413    arch_width: ArchWidth,
414    registers: &mut RegisterState,
415    extended_pstate: &ExtendedPstateState,
416    signal_state: &mut SignalState,
417    siginfo: SignalInfo,
418    action: sigaction_t,
419) -> Result<(), Errno> {
420    let main_stack = registers.stack_pointer_register().checked_sub(RED_ZONE_SIZE);
421    let stack_bottom = if (action.sa_flags & SA_ONSTACK as u64) != 0 {
422        match signal_state.alt_stack {
423            Some(sigaltstack) => {
424                match main_stack {
425                    // Only install the sigaltstack if the stack pointer is not already in it.
426                    Some(sp) if sigaltstack_contains_pointer(&sigaltstack, sp) => main_stack,
427                    _ => {
428                        // Since the stack grows down, the size is added to the ss_sp when
429                        // calculating the "bottom" of the stack.
430                        // Use the main stack if sigaltstack overflows.
431                        sigaltstack
432                            .ss_sp
433                            .addr
434                            .checked_add(sigaltstack.ss_size)
435                            .map(|sp| sp as u64)
436                            .or(main_stack)
437                    }
438                }
439            }
440            None => main_stack,
441        }
442    } else {
443        main_stack
444    }
445    .ok_or_else(|| errno!(EINVAL))?;
446
447    let stack_pointer =
448        align_stack_pointer(stack_bottom.checked_sub(SIG_STACK_SIZE as u64).ok_or_else(|| {
449            errno!(
450                EINVAL,
451                format!(
452                    "Subtracting SIG_STACK_SIZE ({}) from stack bottom ({}) overflowed",
453                    SIG_STACK_SIZE, stack_bottom
454                )
455            )
456        })?);
457
458    // Check that if the stack pointer is inside altstack, the entire signal stack is inside
459    // altstack.
460    if let Some(alt_stack) = signal_state.alt_stack {
461        if sigaltstack_contains_pointer(&alt_stack, stack_pointer)
462            != sigaltstack_contains_pointer(&alt_stack, stack_bottom)
463        {
464            return error!(EINVAL);
465        }
466    }
467
468    let signal_stack_frame = SignalStackFrame::new(
469        task,
470        arch_width,
471        registers,
472        extended_pstate,
473        signal_state,
474        &siginfo,
475        action,
476        UserAddress::from(stack_pointer),
477    )?;
478
479    // Write the signal stack frame at the updated stack pointer.
480    task.write_memory(UserAddress::from(stack_pointer), signal_stack_frame.as_bytes())?;
481
482    let mut mask: SigSet = action.sa_mask.into();
483    if action.sa_flags & (SA_NODEFER as u64) == 0 {
484        mask = mask | siginfo.signal.into();
485    }
486
487    // Preserve the existing mask when handling a nested signal
488    signal_state.set_mask(mask | signal_state.mask());
489
490    registers.set_stack_pointer_register(stack_pointer);
491    registers.set_arg0_register(siginfo.signal.number() as u64);
492    if (action.sa_flags & SA_SIGINFO as u64) != 0 {
493        registers.set_arg1_register(
494            stack_pointer + memoffset::offset_of!(SignalStackFrame, siginfo_bytes) as u64,
495        );
496        registers.set_arg2_register(
497            stack_pointer + memoffset::offset_of!(SignalStackFrame, context) as u64,
498        );
499    }
500    registers.set_instruction_pointer_register(action.sa_handler.addr);
501    registers.reset_flags(); // TODO(https://fxbug.dev/413070731): Verify and update the logic in resetting the flags.
502
503    Ok(())
504}
505
506pub fn restore_from_signal_handler(current_task: &mut CurrentTask) -> Result<(), Errno> {
507    // Read the signal stack frame from memory.
508    let signal_frame_address = UserAddress::from(align_stack_pointer(
509        current_task.thread_state.registers.stack_pointer_register(),
510    ));
511    let signal_stack_bytes =
512        current_task.read_memory_to_array::<SIG_STACK_SIZE>(signal_frame_address)?;
513
514    // Grab the registers state from the stack frame.
515    let signal_stack_frame = SignalStackFrame::from_bytes(signal_stack_bytes);
516    restore_registers(current_task, &signal_stack_frame, signal_frame_address)?;
517
518    // Restore the stored signal mask.
519    current_task.write().set_signal_mask(signal_stack_frame.get_signal_mask());
520
521    Ok(())
522}
523
524/// Maybe adjust a task's registers to restart a syscall once the task switches back to userspace,
525/// based on whether the task previously had a syscall return with an error code indicating that a
526/// restart was required.
527pub fn prepare_to_restart_syscall(thread_state: &mut ThreadState, sigaction: Option<sigaction_t>) {
528    // Taking the value ensures each syscall is only considered for restart once.
529    let Some(err) = thread_state.restart_code.take() else {
530        // Don't interact with register state at all unless other kernel code indicates that we may
531        // need to restart.
532        return;
533    };
534
535    if err.should_restart(sigaction) {
536        // This error code is returned for system calls that need restart_syscall() to adjust time
537        // related arguments when the syscall is restarted. Other syscall restarts can be dispatched
538        // directly to the original syscall implementation.
539        if err == ERESTART_RESTARTBLOCK {
540            thread_state.registers.prepare_for_custom_restart();
541        } else {
542            thread_state.registers.restore_original_return_register();
543        }
544
545        // TODO(https://fxbug.dev/388051291) figure out whether Linux relies on registers here
546        thread_state.registers.rewind_syscall_instruction();
547    } else {
548        thread_state.registers.set_return_register(EINTR.return_value());
549    }
550}
551
552pub fn sys_restart_syscall(
553    locked: &mut Locked<Unlocked>,
554    current_task: &mut CurrentTask,
555) -> Result<SyscallResult, Errno> {
556    match current_task.thread_state.syscall_restart_func.take() {
557        Some(f) => f(locked, current_task),
558        None => {
559            // This may indicate a bug where a syscall returns ERESTART_RESTARTBLOCK without
560            // setting a restart func. But it can also be triggered by userspace, e.g. by directly
561            // calling restart_syscall or injecting an ERESTART_RESTARTBLOCK error through ptrace.
562            log_warn!("restart_syscall called, but nothing to restart");
563            error!(EINTR)
564        }
565    }
566}
567
568/// Test utilities for signal handling.
569#[cfg(test)]
570pub(crate) mod testing {
571    use super::*;
572    use crate::testing::AutoReleasableTask;
573    use std::ops::DerefMut as _;
574
575    pub(crate) fn dequeue_signal_for_test(
576        locked: &mut Locked<Unlocked>,
577        current_task: &mut AutoReleasableTask,
578    ) {
579        dequeue_signal(locked, current_task.deref_mut());
580    }
581}