1pub mod kernels;
6pub mod pager;
7pub mod proxy;
8pub mod suspend;
9
10use crate::pager::run_pager;
11use anyhow::{Error, anyhow};
12use fidl::endpoints::{DiscoverableProtocolMarker, Proxy, ServerEnd};
13use fidl::{HandleBased, Peered};
14use fuchsia_component::client as fclient;
15use fuchsia_sync::Mutex;
16use futures::TryStreamExt;
17use kernels::Kernels;
18use log::warn;
19use proxy::ChannelProxy;
20use rand::Rng;
21use std::future::Future;
22use std::sync::Arc;
23use suspend::{
24 ASLEEP_SIGNAL, AWAKE_SIGNAL, SuspendContext, WakeSource, WakeSources, suspend_container,
25};
26use {
27 fidl_fuchsia_component as fcomponent, fidl_fuchsia_component_decl as fdecl,
28 fidl_fuchsia_component_runner as frunner, fidl_fuchsia_io as fio,
29 fidl_fuchsia_starnix_container as fstarnix, fidl_fuchsia_starnix_runner as fstarnixrunner,
30 fuchsia_async as fasync,
31};
32
33const KERNEL_COLLECTION: &str = "kernels";
35
36const CONTAINER_RUNNER_PROTOCOL: &str = "fuchsia.starnix.container.Runner";
43
44#[allow(dead_code)]
45pub struct StarnixKernel {
46 name: String,
48
49 exposed_dir: fio::DirectoryProxy,
53
54 component_instance: zx::Event,
56
57 job: Arc<zx::Job>,
59
60 wake_lease: Mutex<Option<zx::EventPair>>,
62}
63
64impl StarnixKernel {
65 pub async fn create(
71 realm: fcomponent::RealmProxy,
72 kernel_url: &str,
73 start_info: frunner::ComponentStartInfo,
74 controller: ServerEnd<frunner::ComponentControllerMarker>,
75 ) -> Result<(Self, impl Future<Output = ()>), Error> {
76 let kernel_name = generate_kernel_name(&start_info)?;
77 let component_instance = start_info
78 .component_instance
79 .as_ref()
80 .ok_or_else(|| {
81 anyhow::anyhow!("expected to find component_instance in ComponentStartInfo")
82 })?
83 .duplicate_handle(zx::Rights::SAME_RIGHTS)?;
84
85 let (controller_proxy, controller_server_end) = fidl::endpoints::create_proxy();
87 realm
88 .create_child(
89 &fdecl::CollectionRef { name: KERNEL_COLLECTION.into() },
90 &fdecl::Child {
91 name: Some(kernel_name.clone()),
92 url: Some(kernel_url.to_string()),
93 startup: Some(fdecl::StartupMode::Lazy),
94 ..Default::default()
95 },
96 fcomponent::CreateChildArgs {
97 controller: Some(controller_server_end),
98 ..Default::default()
99 },
100 )
101 .await?
102 .map_err(|e| anyhow::anyhow!("failed to create kernel: {:?}", e))?;
103
104 let exposed_dir = open_exposed_directory(&realm, &kernel_name, KERNEL_COLLECTION).await?;
105 let container_runner = fclient::connect_to_named_protocol_at_dir_root::<
106 frunner::ComponentRunnerMarker,
107 >(&exposed_dir, CONTAINER_RUNNER_PROTOCOL)?;
108
109 container_runner.start(start_info, controller)?;
111
112 let container_controller =
114 fclient::connect_to_protocol_at_dir_root::<fstarnix::ControllerMarker>(&exposed_dir)?;
115 let fstarnix::ControllerGetJobHandleResponse { job, .. } =
116 container_controller.get_job_handle().await?;
117 let Some(job) = job else {
118 anyhow::bail!("expected to find job in ControllerGetJobHandleResponse");
119 };
120
121 let kernel = Self {
122 name: kernel_name,
123 exposed_dir,
124 component_instance,
125 job: Arc::new(job),
126 wake_lease: Default::default(),
127 };
128 let on_stop = async move {
129 _ = controller_proxy.into_channel().unwrap().on_closed().await;
130 };
131 Ok((kernel, on_stop))
132 }
133
134 pub fn component_instance(&self) -> &zx::Event {
136 &self.component_instance
137 }
138
139 pub fn job(&self) -> &Arc<zx::Job> {
141 &self.job
142 }
143
144 pub fn connect_to_protocol<P: DiscoverableProtocolMarker>(&self) -> Result<P::Proxy, Error> {
146 fclient::connect_to_protocol_at_dir_root::<P>(&self.exposed_dir)
147 }
148}
149
150fn generate_kernel_name(_start_info: &frunner::ComponentStartInfo) -> Result<String, Error> {
156 let random_id: String =
157 rand::rng().sample_iter(&rand::distr::Alphanumeric).take(7).map(char::from).collect();
158 Ok(random_id)
159}
160
161async fn open_exposed_directory(
162 realm: &fcomponent::RealmProxy,
163 child_name: &str,
164 collection_name: &str,
165) -> Result<fio::DirectoryProxy, Error> {
166 let (directory_proxy, server_end) = fidl::endpoints::create_proxy::<fio::DirectoryMarker>();
167 realm
168 .open_exposed_dir(
169 &fdecl::ChildRef { name: child_name.into(), collection: Some(collection_name.into()) },
170 server_end,
171 )
172 .await?
173 .map_err(|e| {
174 anyhow!(
175 "failed to bind to child {} in collection {:?}: {:?}",
176 child_name,
177 collection_name,
178 e
179 )
180 })?;
181 Ok(directory_proxy)
182}
183
184pub async fn serve_starnix_manager(
185 mut stream: fstarnixrunner::ManagerRequestStream,
186 suspend_context: Arc<SuspendContext>,
187 kernels: &Kernels,
188 sender: &async_channel::Sender<(ChannelProxy, Arc<Mutex<WakeSources>>)>,
189) -> Result<(), Error> {
190 while let Some(event) = stream.try_next().await? {
191 match event {
192 fstarnixrunner::ManagerRequest::SuspendContainer { payload, responder, .. } => {
193 let response = suspend_container(payload, &suspend_context, kernels).await?;
194 if let Err(e) = match response {
195 Ok(o) => responder.send(Ok(&o)),
196 Err(e) => responder.send(Err(e)),
197 } {
198 warn!("error replying to suspend request: {e}");
199 }
200 }
201 fstarnixrunner::ManagerRequest::ProxyWakeChannel { payload, .. } => {
202 let fstarnixrunner::ManagerProxyWakeChannelRequest {
203 container_job: Some(_container_job),
205 remote_channel: Some(remote_channel),
206 container_channel: Some(container_channel),
207 name: Some(name),
208 counter: Some(message_counter),
209 ..
210 } = payload
211 else {
212 continue;
213 };
214
215 suspend_context.wake_sources.lock().insert(
216 message_counter.koid().unwrap(),
217 WakeSource::from_counter(
218 message_counter.duplicate_handle(zx::Rights::SAME_RIGHTS)?,
219 name.clone(),
220 ),
221 );
222
223 let proxy =
224 ChannelProxy { container_channel, remote_channel, message_counter, name };
225
226 sender.try_send((proxy, suspend_context.wake_sources.clone())).unwrap();
227 }
228 fstarnixrunner::ManagerRequest::RegisterWakeWatcher { payload, responder } => {
229 if let Some(watcher) = payload.watcher {
230 let (clear_mask, set_mask) = (ASLEEP_SIGNAL, AWAKE_SIGNAL);
231 watcher.signal_peer(clear_mask, set_mask)?;
232
233 suspend_context.wake_watchers.lock().push(watcher);
234 }
235 if let Err(e) = responder.send() {
236 warn!("error registering power watcher: {e}");
237 }
238 }
239 fstarnixrunner::ManagerRequest::AddWakeSource { payload, .. } => {
240 let fstarnixrunner::ManagerAddWakeSourceRequest {
241 container_job: Some(_container_job),
243 name: Some(name),
244 handle: Some(handle),
245 signals: Some(signals),
246 ..
247 } = payload
248 else {
249 continue;
250 };
251 suspend_context.wake_sources.lock().insert(
252 handle.koid().unwrap(),
253 WakeSource::from_handle(
254 handle,
255 name.clone(),
256 zx::Signals::from_bits_truncate(signals),
257 ),
258 );
259 }
260 fstarnixrunner::ManagerRequest::RemoveWakeSource { payload, .. } => {
261 let fstarnixrunner::ManagerRemoveWakeSourceRequest {
262 container_job: Some(_container_job),
264 handle: Some(handle),
265 ..
266 } = payload
267 else {
268 continue;
269 };
270
271 suspend_context.wake_sources.lock().remove(&handle.koid().unwrap());
272 }
273 fstarnixrunner::ManagerRequest::CreatePager { payload, .. } => {
274 std::thread::spawn(|| {
275 fasync::LocalExecutor::default().run_singlethreaded(run_pager(payload));
276 });
277 }
278 _ => {}
279 }
280 }
281 Ok(())
282}