netstack3/
main.rs

1// Copyright 2018 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5//! A networking stack.
6#![warn(clippy::unused_async)]
7#![warn(missing_docs, unreachable_patterns, unused)]
8#![recursion_limit = "256"]
9
10mod bindings;
11
12use std::num::NonZeroU8;
13
14use fidl::endpoints::RequestStream as _;
15use fuchsia_async::SendExecutorBuilder;
16use fuchsia_component::server::{ServiceFs, ServiceFsDir};
17use futures::{Future, StreamExt as _};
18use log::{error, info};
19use {fidl_fuchsia_process_lifecycle as fprocess_lifecycle, fuchsia_async as fasync};
20
21use bindings::{GlobalConfig, InspectPublisher, InterfaceConfigDefaults, NetstackSeed, Service};
22
23/// Runs Netstack3.
24pub fn main() {
25    let config = ns3_config::Config::take_from_startup_handle();
26    let ns3_config::Config {
27        num_threads,
28        debug_logs,
29        opaque_iids,
30        suspend_enabled,
31        sampled_stats_enabled,
32    } = &config;
33    let num_threads = NonZeroU8::new(*num_threads).expect("invalid 0 thread count value");
34    let mut executor = SendExecutorBuilder::new().num_threads(num_threads.get().into()).build();
35
36    let mut log_options = diagnostics_log::PublishOptions::default();
37
38    // NB: netstack3 is usually launched with a 'netstack' moniker already -
39    // which implies an automatic 'netstack' tag. However, the automatic tag has
40    // shown problems when extra tags are present in specific log lines (e.g.
41    // https://fxbug.dev/390252317, https://fxbug.dev/390252218). Given that,
42    // we always initialize with the netstack tag here.
43    log_options = log_options.tags(&["netstack"]);
44
45    if *debug_logs {
46        // When forcing debug logs, disable all the dynamic features from the
47        // logging framework, we want logs pegged at Severity::Debug.
48        log_options = log_options
49            .minimum_severity(diagnostics_log::Severity::Debug)
50            .listen_for_interest_updates(false);
51    }
52    diagnostics_log::initialize(log_options).expect("failed to initialize log");
53
54    fuchsia_trace_provider::trace_provider_create_with_fdio();
55
56    info!("starting netstack3 with {config:?}");
57
58    let mut fs = ServiceFs::new();
59    let _: &mut ServiceFsDir<'_, _> = fs
60        .dir("svc")
61        // TODO(https://fxbug.dev/42076541): This is transitional. Once the
62        // out-of-stack DHCP client is being used by both netstacks, it
63        // should be moved out of the netstack realm and into the network
64        // realm. The trip through Netstack3 allows for availability of DHCP
65        // client to be dependent on Netstack version when using
66        // netstack-proxy.
67        .add_proxy_service::<fidl_fuchsia_net_dhcp::ClientProviderMarker, _>()
68        .add_fidl_service(Service::Control)
69        .add_service_connector(Service::DebugDiagnostics)
70        .add_fidl_service(Service::DebugInterfaces)
71        .add_fidl_service(Service::DnsServerWatcher)
72        .add_fidl_service(Service::FilterControl)
73        .add_fidl_service(Service::FilterState)
74        .add_fidl_service(Service::HealthCheck)
75        .add_fidl_service(Service::Interfaces)
76        .add_fidl_service(Service::InterfacesAdmin)
77        .add_fidl_service(Service::MulticastAdminV4)
78        .add_fidl_service(Service::MulticastAdminV6)
79        .add_fidl_service(Service::NdpWatcher)
80        .add_fidl_service(Service::Neighbor)
81        .add_fidl_service(Service::NeighborController)
82        .add_fidl_service(Service::PacketSocket)
83        .add_fidl_service(Service::RawSocket)
84        .add_fidl_service(Service::RootFilter)
85        .add_fidl_service(Service::RootInterfaces)
86        .add_fidl_service(Service::RootRoutesV4)
87        .add_fidl_service(Service::RootRoutesV6)
88        .add_fidl_service(Service::RoutesAdminV4)
89        .add_fidl_service(Service::RoutesAdminV6)
90        .add_fidl_service(Service::RoutesState)
91        .add_fidl_service(Service::RoutesStateV4)
92        .add_fidl_service(Service::RoutesStateV6)
93        .add_fidl_service(Service::RouteTableProviderV4)
94        .add_fidl_service(Service::RouteTableProviderV6)
95        .add_fidl_service(Service::RuleTableV4)
96        .add_fidl_service(Service::RuleTableV6)
97        .add_fidl_service(Service::SettingsControl)
98        .add_fidl_service(Service::SettingsState)
99        .add_fidl_service(Service::Socket)
100        .add_fidl_service(Service::SocketControl)
101        .add_fidl_service(Service::SocketDiagnostics)
102        .add_fidl_service(Service::Stack)
103        .add_fidl_service(Service::WakeGroupProvider);
104
105    let seed = NetstackSeed::new(
106        GlobalConfig {
107            suspend_enabled: *suspend_enabled,
108            sampled_stats_enabled: *sampled_stats_enabled,
109        },
110        &InterfaceConfigDefaults { opaque_iids: *opaque_iids },
111    );
112
113    let inspect_publisher = InspectPublisher::new();
114    inspect_publisher
115        .inspector()
116        .root()
117        .record_child("Config", |config_node| config.record_inspect(config_node));
118
119    let _: &mut ServiceFs<_> = fs.take_and_serve_directory_handle().expect("directory handle");
120
121    // Short circuit when we receive a lifecycle stop request.
122    let fs = fs.take_until(get_lifecycle_stop_fut());
123
124    executor.run(seed.serve(fs, inspect_publisher))
125}
126
127/// Takes the lifecycle handle from startup and returns a future that resolves
128/// whenever the system has requested shutdown.
129fn get_lifecycle_stop_fut() -> impl Future<Output = ()> {
130    // Lifecycle handle takes no args, must be set to zero.
131    // See zircon/processargs.h.
132    const LIFECYCLE_HANDLE_ARG: u16 = 0;
133    let handle = fuchsia_runtime::take_startup_handle(fuchsia_runtime::HandleInfo::new(
134        fuchsia_runtime::HandleType::Lifecycle,
135        LIFECYCLE_HANDLE_ARG,
136    ))
137    .expect("missing lifecycle handle");
138
139    async move {
140        let mut request_stream = fprocess_lifecycle::LifecycleRequestStream::from_channel(
141            fasync::Channel::from_channel(handle.into()).into(),
142        );
143        loop {
144            match request_stream.next().await {
145                Some(Ok(fprocess_lifecycle::LifecycleRequest::Stop { control_handle })) => {
146                    info!("received shutdown request");
147                    // Shutdown request is acknowledged by the lifecycle
148                    // channel shutting down. Intentionally leak the channel
149                    // so it'll only be closed on process termination,
150                    // allowing clean process termination to always be
151                    // observed.
152
153                    // Must drop the control_handle to unwrap the
154                    // lifecycle channel.
155                    std::mem::drop(control_handle);
156                    let (inner, _terminated): (_, bool) = request_stream.into_inner();
157                    let inner = std::sync::Arc::try_unwrap(inner)
158                        .expect("failed to retrieve lifecycle channel");
159                    let inner: zx::Channel = inner.into_channel().into_zx_channel();
160                    std::mem::forget(inner);
161                    break;
162                }
163                Some(Err(e)) => error!("observed error in lifecycle request stream: {e:?}"),
164                None => {
165                    // Something really bad must've happened here. We chose
166                    // not to panic because the system must be in a bad
167                    // state, log an error and hold forever.
168                    error!("lifecycle channel closed");
169                    futures::future::pending::<()>().await;
170                }
171            }
172        }
173    }
174}