Skip to main content

gpt_component/
service.rs

1// Copyright 2024 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use crate::config::Config;
6use crate::gpt::GptManager;
7use anyhow::{Context as _, Error};
8use block_client::RemoteBlockClient;
9use fidl::endpoints::{DiscoverableProtocolMarker as _, RequestStream as _, ServiceMarker as _};
10use fidl_fuchsia_fs as ffs;
11use fidl_fuchsia_fs_startup as fstartup;
12use fidl_fuchsia_io as fio;
13use fidl_fuchsia_process_lifecycle as flifecycle;
14use fidl_fuchsia_storage_block as fblock;
15use fidl_fuchsia_storage_partitions as fpartitions;
16use fuchsia_async as fasync;
17use futures::lock::Mutex as AsyncMutex;
18use futures::stream::TryStreamExt as _;
19use std::sync::Arc;
20use vfs::directory::helper::DirectlyMutable as _;
21use vfs::execution_scope::ExecutionScope;
22
23pub struct GptService {
24    state: AsyncMutex<State>,
25
26    // The execution scope of the pseudo filesystem.
27    scope: ExecutionScope,
28
29    // The root of the pseudo filesystem for the component.
30    export_dir: Arc<vfs::directory::immutable::Simple>,
31
32    // A directory where partitions are published.
33    partitions_dir: Arc<vfs::directory::immutable::Simple>,
34}
35
36#[derive(Default)]
37enum State {
38    #[default]
39    Stopped,
40    /// The GPT is malformed and needs to be reformatted with ResetPartitionTables before it can be
41    /// used.  The component will publish an empty partitions directory.
42    NeedsFormatting(Config, fblock::BlockProxy),
43    Running(Arc<GptManager>),
44}
45
46impl State {
47    fn is_stopped(&self) -> bool {
48        if let Self::Stopped = self { true } else { false }
49    }
50}
51
52impl GptService {
53    pub fn new() -> Arc<Self> {
54        let export_dir = vfs::directory::immutable::simple();
55        let partitions_dir = vfs::directory::immutable::simple();
56        Arc::new(Self {
57            state: Default::default(),
58            scope: ExecutionScope::new(),
59            export_dir,
60            partitions_dir,
61        })
62    }
63
64    pub async fn run(
65        self: Arc<Self>,
66        outgoing_dir: zx::Channel,
67        lifecycle_channel: Option<zx::Channel>,
68    ) -> Result<(), Error> {
69        let svc_dir = vfs::directory::immutable::simple();
70        self.export_dir.add_entry("svc", svc_dir.clone()).expect("Unable to create svc dir");
71
72        svc_dir
73            .add_entry(
74                fpartitions::PartitionServiceMarker::SERVICE_NAME,
75                self.partitions_dir.clone(),
76            )
77            .unwrap();
78        let weak = Arc::downgrade(&self);
79        svc_dir
80            .add_entry(
81                fstartup::StartupMarker::PROTOCOL_NAME,
82                vfs::service::host(move |requests| {
83                    let weak = weak.clone();
84                    async move {
85                        if let Some(me) = weak.upgrade() {
86                            let _ = me.handle_start_requests(requests).await;
87                        }
88                    }
89                }),
90            )
91            .unwrap();
92        let weak = Arc::downgrade(&self);
93        svc_dir
94            .add_entry(
95                fpartitions::PartitionsAdminMarker::PROTOCOL_NAME,
96                vfs::service::host(move |requests| {
97                    let weak = weak.clone();
98                    async move {
99                        if let Some(me) = weak.upgrade() {
100                            let _ = me.handle_partitions_admin_requests(requests).await;
101                        }
102                    }
103                }),
104            )
105            .unwrap();
106        let weak = Arc::downgrade(&self);
107        svc_dir
108            .add_entry(
109                fpartitions::PartitionsManagerMarker::PROTOCOL_NAME,
110                vfs::service::host(move |requests| {
111                    let weak = weak.clone();
112                    async move {
113                        if let Some(me) = weak.upgrade() {
114                            let _ = me.handle_partitions_manager_requests(requests).await;
115                        }
116                    }
117                }),
118            )
119            .unwrap();
120        let weak = Arc::downgrade(&self);
121        svc_dir
122            .add_entry(
123                ffs::AdminMarker::PROTOCOL_NAME,
124                vfs::service::host(move |requests| {
125                    let weak = weak.clone();
126                    async move {
127                        if let Some(me) = weak.upgrade() {
128                            let _ = me.handle_admin_requests(requests).await;
129                        }
130                    }
131                }),
132            )
133            .unwrap();
134
135        vfs::directory::serve_on(
136            self.export_dir.clone(),
137            fio::PERM_READABLE | fio::PERM_WRITABLE | fio::PERM_EXECUTABLE,
138            self.scope.clone(),
139            fidl::endpoints::ServerEnd::new(outgoing_dir),
140        );
141
142        if let Some(channel) = lifecycle_channel {
143            let me = self.clone();
144            self.scope.spawn(async move {
145                if let Err(e) = me.handle_lifecycle_requests(channel).await {
146                    log::warn!(error:? = e; "handle_lifecycle_requests");
147                }
148            });
149        }
150
151        self.scope.wait().await;
152
153        Ok(())
154    }
155
156    async fn handle_start_requests(
157        self: Arc<Self>,
158        mut stream: fstartup::StartupRequestStream,
159    ) -> Result<(), Error> {
160        while let Some(request) = stream.try_next().await.context("Reading request")? {
161            log::debug!(request:?; "");
162            match request {
163                fstartup::StartupRequest::Start { device, options, responder } => {
164                    responder
165                        .send(
166                            self.start(device.into_proxy(), options.into())
167                                .await
168                                .map_err(|status| status.into_raw()),
169                        )
170                        .unwrap_or_else(|e| log::error!(e:?; "Failed to send Start response"));
171                }
172                fstartup::StartupRequest::Format { responder, .. } => {
173                    responder
174                        .send(Err(zx::Status::NOT_SUPPORTED.into_raw()))
175                        .unwrap_or_else(|e| log::error!(e:?; "Failed to send Check response"));
176                }
177                fstartup::StartupRequest::Check { responder, .. } => {
178                    responder
179                        .send(Err(zx::Status::NOT_SUPPORTED.into_raw()))
180                        .unwrap_or_else(|e| log::error!(e:?; "Failed to send Check response"));
181                }
182            }
183        }
184        Ok(())
185    }
186
187    async fn start(
188        self: &Arc<Self>,
189        device: fblock::BlockProxy,
190        config: Config,
191    ) -> Result<(), zx::Status> {
192        log::info!(config:?; "GPT starting");
193        let mut state = self.state.lock().await;
194        if !state.is_stopped() {
195            log::warn!("Device already bound");
196            return Err(zx::Status::ALREADY_BOUND);
197        }
198
199        // TODO(https://fxbug.dev/339491886): It would be better if `start` failed on a malformed
200        // device, rather than hiding this state from fshost.  However, fs_management isn't well set
201        // up to deal with this, because it ties the outgoing directory to a successful return from
202        // `start` (see `ServingMultiVolumeFilesystem`), and fshost resolves the queued requests for
203        // the filesystem only after `start` is successful.  We should refactor fs_management and
204        // fshost to better support this case, which might require changing how queueing works so
205        // there's more flexibility to either resolve queueing when the component starts up (what we
206        // need here), or when `Start` is successful (what a filesystem like Fxfs needs).
207        *state = match GptManager::new_with_config(
208            device.clone(),
209            self.partitions_dir.clone(),
210            config.clone(),
211        )
212        .await
213        {
214            Ok(runner) => State::Running(runner),
215            Err(err) => {
216                // This is a warning because, as described above, we can't return an error from
217                // here, so there are normal flows that can print this error message that don't
218                // indicate a bug or incorrect behavior.
219                log::warn!(err:?; "Failed to load GPT.  Reformatting may be required.");
220                State::NeedsFormatting(config, device)
221            }
222        };
223        Ok(())
224    }
225
226    async fn handle_partitions_manager_requests(
227        self: Arc<Self>,
228        mut stream: fpartitions::PartitionsManagerRequestStream,
229    ) -> Result<(), Error> {
230        while let Some(request) = stream.try_next().await.context("Reading request")? {
231            log::debug!(request:?; "");
232            match request {
233                fpartitions::PartitionsManagerRequest::GetBlockInfo { responder } => {
234                    responder
235                        .send(self.get_block_info().await.map_err(|status| status.into_raw()))
236                        .unwrap_or_else(
237                            |e| log::error!(e:?; "Failed to send GetBlockInfo response"),
238                        );
239                }
240                fpartitions::PartitionsManagerRequest::CreateTransaction { responder } => {
241                    responder
242                        .send(self.create_transaction().await.map_err(|status| status.into_raw()))
243                        .unwrap_or_else(
244                            |e| log::error!(e:?; "Failed to send CreateTransaction response"),
245                        );
246                }
247                fpartitions::PartitionsManagerRequest::CommitTransaction {
248                    transaction,
249                    responder,
250                } => {
251                    responder
252                        .send(
253                            self.commit_transaction(transaction)
254                                .await
255                                .map_err(|status| status.into_raw()),
256                        )
257                        .unwrap_or_else(
258                            |e| log::error!(e:?; "Failed to send CommitTransaction response"),
259                        );
260                }
261                fpartitions::PartitionsManagerRequest::AddPartition { payload, responder } => {
262                    responder
263                        .send(self.add_partition(payload).await.map_err(|status| status.into_raw()))
264                        .unwrap_or_else(
265                            |e| log::error!(e:?; "Failed to send AddPartition response"),
266                        );
267                }
268            }
269        }
270        Ok(())
271    }
272
273    async fn get_block_info(&self) -> Result<(u64, u32), zx::Status> {
274        let state = self.state.lock().await;
275        match &*state {
276            State::Stopped => return Err(zx::Status::BAD_STATE),
277            State::NeedsFormatting(_, block) => {
278                let info = block
279                    .get_info()
280                    .await
281                    .map_err(|err| {
282                        log::error!(err:?; "get_block_info: failed to query block info");
283                        zx::Status::IO
284                    })?
285                    .map_err(zx::Status::from_raw)?;
286                Ok((info.block_count, info.block_size))
287            }
288            State::Running(gpt) => Ok((gpt.block_count(), gpt.block_size())),
289        }
290    }
291
292    async fn create_transaction(&self) -> Result<zx::EventPair, zx::Status> {
293        let gpt_manager = self.gpt_manager().await?;
294        gpt_manager.create_transaction().await
295    }
296
297    async fn commit_transaction(&self, transaction: zx::EventPair) -> Result<(), zx::Status> {
298        let gpt_manager = self.gpt_manager().await?;
299        gpt_manager.commit_transaction(transaction).await
300    }
301
302    async fn add_partition(
303        &self,
304        request: fpartitions::PartitionsManagerAddPartitionRequest,
305    ) -> Result<(), zx::Status> {
306        let gpt_manager = self.gpt_manager().await?;
307        gpt_manager.add_partition(request).await
308    }
309
310    async fn handle_partitions_admin_requests(
311        self: Arc<Self>,
312        mut stream: fpartitions::PartitionsAdminRequestStream,
313    ) -> Result<(), Error> {
314        while let Some(request) = stream.try_next().await.context("Reading request")? {
315            log::debug!(request:?; "");
316            match request {
317                fpartitions::PartitionsAdminRequest::ResetPartitionTable {
318                    partitions,
319                    responder,
320                } => {
321                    responder
322                        .send(
323                            self.reset_partition_table(partitions)
324                                .await
325                                .map_err(|status| status.into_raw()),
326                        )
327                        .unwrap_or_else(|e| log::error!(e:?; "Failed to send Start response"));
328                }
329            }
330        }
331        Ok(())
332    }
333
334    async fn reset_partition_table(
335        &self,
336        partitions: Vec<fpartitions::PartitionInfo>,
337    ) -> Result<(), zx::Status> {
338        fn convert_partition_info(info: fpartitions::PartitionInfo) -> gpt::PartitionInfo {
339            gpt::PartitionInfo {
340                label: info.name,
341                type_guid: gpt::Guid::from_bytes(info.type_guid.value),
342                instance_guid: gpt::Guid::from_bytes(info.instance_guid.value),
343                start_block: info.start_block,
344                num_blocks: info.num_blocks,
345                flags: info.flags,
346            }
347        }
348        let partitions = partitions.into_iter().map(convert_partition_info).collect::<Vec<_>>();
349
350        let mut state = self.state.lock().await;
351        match &mut *state {
352            State::Stopped => return Err(zx::Status::BAD_STATE),
353            State::NeedsFormatting(config, block) => {
354                log::info!("reset_partition_table: Reformatting GPT.");
355                let client = Arc::new(RemoteBlockClient::new(block.clone()).await?);
356
357                log::info!("reset_partition_table: Reformatting GPT...");
358                gpt::Gpt::format(client, partitions).await.map_err(|err| {
359                    log::error!(err:?; "reset_partition_table: failed to init GPT");
360                    zx::Status::IO
361                })?;
362                *state = State::Running(
363                    GptManager::new_with_config(
364                        block.clone(),
365                        self.partitions_dir.clone(),
366                        config.clone(),
367                    )
368                    .await
369                    .map_err(|err| {
370                        log::error!(err:?; "reset_partition_table: failed to re-launch GPT");
371                        zx::Status::BAD_STATE
372                    })?,
373                );
374            }
375            State::Running(gpt) => {
376                log::info!("reset_partition_table: Updating GPT.");
377                gpt.reset_partition_table(partitions).await?;
378            }
379        }
380        Ok(())
381    }
382
383    async fn handle_admin_requests(
384        &self,
385        mut stream: ffs::AdminRequestStream,
386    ) -> Result<(), Error> {
387        if let Some(request) = stream.try_next().await.context("Reading request")? {
388            match request {
389                ffs::AdminRequest::Shutdown { responder } => {
390                    log::info!("Received Admin::Shutdown request");
391                    self.shutdown().await;
392                    responder
393                        .send()
394                        .unwrap_or_else(|e| log::error!(e:?; "Failed to send shutdown response"));
395                    log::info!("Admin shutdown complete");
396                }
397            }
398        }
399        Ok(())
400    }
401
402    async fn handle_lifecycle_requests(&self, lifecycle_channel: zx::Channel) -> Result<(), Error> {
403        let mut stream = flifecycle::LifecycleRequestStream::from_channel(
404            fasync::Channel::from_channel(lifecycle_channel),
405        );
406        match stream.try_next().await.context("Reading request")? {
407            Some(flifecycle::LifecycleRequest::Stop { .. }) => {
408                log::info!("Received Lifecycle::Stop request");
409                self.shutdown().await;
410                log::info!("Lifecycle shutdown complete");
411            }
412            None => {}
413        }
414        Ok(())
415    }
416
417    async fn gpt_manager(&self) -> Result<Arc<GptManager>, zx::Status> {
418        match &*self.state.lock().await {
419            State::Stopped | State::NeedsFormatting(_, _) => Err(zx::Status::BAD_STATE),
420            State::Running(gpt) => Ok(gpt.clone()),
421        }
422    }
423
424    async fn shutdown(&self) {
425        let mut state = self.state.lock().await;
426        if let State::Running(gpt) = std::mem::take(&mut *state) {
427            gpt.shutdown().await;
428        }
429        self.scope.shutdown();
430    }
431}
432
433#[cfg(test)]
434mod tests {
435    use super::GptService;
436    use block_client::RemoteBlockClient;
437    use fidl::endpoints::Proxy as _;
438    use fidl_fuchsia_fs as ffs;
439    use fidl_fuchsia_fs_startup as fstartup;
440    use fidl_fuchsia_io as fio;
441    use fidl_fuchsia_process_lifecycle::LifecycleMarker;
442    use fidl_fuchsia_storage_block as fblock;
443    use fidl_fuchsia_storage_partitions as fpartitions;
444    use fuchsia_async as fasync;
445    use fuchsia_component::client::connect_to_protocol_at_dir_svc;
446    use futures::FutureExt as _;
447    use gpt::{Gpt, Guid, PartitionInfo};
448    use std::sync::Arc;
449    use vmo_backed_block_server::{VmoBackedServer, VmoBackedServerTestingExt as _};
450
451    async fn setup_server(
452        block_size: u32,
453        block_count: u64,
454        partitions: Vec<PartitionInfo>,
455    ) -> Arc<VmoBackedServer> {
456        let vmo = zx::Vmo::create(block_size as u64 * block_count).unwrap();
457        let server = Arc::new(VmoBackedServer::from_vmo(512, vmo));
458        {
459            let (block_client, block_server) =
460                fidl::endpoints::create_proxy::<fblock::BlockMarker>();
461            let volume_stream = fidl::endpoints::ServerEnd::<fblock::BlockMarker>::from(
462                block_server.into_channel(),
463            )
464            .into_stream();
465            let server_clone = server.clone();
466            let _task = fasync::Task::spawn(async move { server_clone.serve(volume_stream).await });
467            let client = Arc::new(RemoteBlockClient::new(block_client).await.unwrap());
468            Gpt::format(client, partitions).await.unwrap();
469        }
470        server
471    }
472
473    #[fuchsia::test]
474    async fn lifecycle() {
475        let (outgoing_dir, outgoing_dir_server) =
476            fidl::endpoints::create_proxy::<fio::DirectoryMarker>();
477        let (lifecycle_client, lifecycle_server) =
478            fidl::endpoints::create_proxy::<LifecycleMarker>();
479        let (block_client, block_server) =
480            fidl::endpoints::create_endpoints::<fblock::BlockMarker>();
481        let volume_stream =
482            fidl::endpoints::ServerEnd::<fblock::BlockMarker>::from(block_server.into_channel())
483                .into_stream();
484
485        futures::join!(
486            async {
487                // Client
488                let client =
489                    connect_to_protocol_at_dir_svc::<fstartup::StartupMarker>(&outgoing_dir)
490                        .unwrap();
491                client
492                    .start(block_client, &fstartup::StartOptions::default())
493                    .await
494                    .expect("FIDL error")
495                    .expect("Start failed");
496                lifecycle_client.stop().expect("Stop failed");
497                fasync::OnSignals::new(
498                    &lifecycle_client.into_channel().expect("into_channel failed"),
499                    zx::Signals::CHANNEL_PEER_CLOSED,
500                )
501                .await
502                .expect("OnSignals failed");
503            },
504            async {
505                // Server
506                let service = GptService::new();
507                service
508                    .run(outgoing_dir_server.into_channel(), Some(lifecycle_server.into_channel()))
509                    .await
510                    .expect("Run failed");
511            },
512            async {
513                // Block device
514                let server = setup_server(
515                    512,
516                    8,
517                    vec![PartitionInfo {
518                        label: "part".to_string(),
519                        type_guid: Guid::from_bytes([0xabu8; 16]),
520                        instance_guid: Guid::from_bytes([0xcdu8; 16]),
521                        start_block: 4,
522                        num_blocks: 1,
523                        flags: 0,
524                    }],
525                )
526                .await;
527                let _ = server.serve(volume_stream).await;
528            }
529            .fuse(),
530        );
531    }
532
533    #[fuchsia::test]
534    async fn admin_shutdown() {
535        let (outgoing_dir, outgoing_dir_server) =
536            fidl::endpoints::create_proxy::<fio::DirectoryMarker>();
537        let (block_client, block_server) =
538            fidl::endpoints::create_endpoints::<fblock::BlockMarker>();
539        let volume_stream =
540            fidl::endpoints::ServerEnd::<fblock::BlockMarker>::from(block_server.into_channel())
541                .into_stream();
542
543        futures::join!(
544            async {
545                // Client
546                let client =
547                    connect_to_protocol_at_dir_svc::<fstartup::StartupMarker>(&outgoing_dir)
548                        .unwrap();
549                let admin_client =
550                    connect_to_protocol_at_dir_svc::<ffs::AdminMarker>(&outgoing_dir).unwrap();
551                client
552                    .start(block_client, &fstartup::StartOptions::default())
553                    .await
554                    .expect("FIDL error")
555                    .expect("Start failed");
556                admin_client.shutdown().await.expect("admin shutdown failed");
557                fasync::OnSignals::new(
558                    &admin_client.into_channel().expect("into_channel failed"),
559                    zx::Signals::CHANNEL_PEER_CLOSED,
560                )
561                .await
562                .expect("OnSignals failed");
563            },
564            async {
565                // Server
566                let service = GptService::new();
567                service.run(outgoing_dir_server.into_channel(), None).await.expect("Run failed");
568            },
569            async {
570                // Block device
571                let server = setup_server(
572                    512,
573                    8,
574                    vec![PartitionInfo {
575                        label: "part".to_string(),
576                        type_guid: Guid::from_bytes([0xabu8; 16]),
577                        instance_guid: Guid::from_bytes([0xcdu8; 16]),
578                        start_block: 4,
579                        num_blocks: 1,
580                        flags: 0,
581                    }],
582                )
583                .await;
584                let _ = server.serve(volume_stream).await;
585            }
586            .fuse(),
587        );
588    }
589
590    #[fuchsia::test]
591    async fn transaction_lifecycle() {
592        let (outgoing_dir, outgoing_dir_server) =
593            fidl::endpoints::create_proxy::<fio::DirectoryMarker>();
594        let (lifecycle_client, lifecycle_server) =
595            fidl::endpoints::create_proxy::<LifecycleMarker>();
596        let (block_client, block_server) =
597            fidl::endpoints::create_endpoints::<fblock::BlockMarker>();
598        let volume_stream =
599            fidl::endpoints::ServerEnd::<fblock::BlockMarker>::from(block_server.into_channel())
600                .into_stream();
601
602        futures::join!(
603            async {
604                // Client
605                connect_to_protocol_at_dir_svc::<fstartup::StartupMarker>(&outgoing_dir)
606                    .unwrap()
607                    .start(block_client, &fstartup::StartOptions::default())
608                    .await
609                    .expect("FIDL error")
610                    .expect("Start failed");
611
612                let pm_client = connect_to_protocol_at_dir_svc::<
613                    fpartitions::PartitionsManagerMarker,
614                >(&outgoing_dir)
615                .unwrap();
616                let transaction = pm_client
617                    .create_transaction()
618                    .await
619                    .expect("FIDL error")
620                    .expect("create_transaction failed");
621
622                pm_client
623                    .create_transaction()
624                    .await
625                    .expect("FIDL error")
626                    .expect_err("create_transaction should fail while other txn exists");
627
628                pm_client
629                    .commit_transaction(transaction)
630                    .await
631                    .expect("FIDL error")
632                    .expect("commit_transaction failed");
633
634                {
635                    let _transaction = pm_client
636                        .create_transaction()
637                        .await
638                        .expect("FIDL error")
639                        .expect("create_transaction should succeed after committing txn");
640                }
641
642                pm_client
643                    .create_transaction()
644                    .await
645                    .expect("FIDL error")
646                    .expect("create_transaction should succeed after dropping txn");
647
648                lifecycle_client.stop().expect("Stop failed");
649                fasync::OnSignals::new(
650                    &lifecycle_client.into_channel().expect("into_channel failed"),
651                    zx::Signals::CHANNEL_PEER_CLOSED,
652                )
653                .await
654                .expect("OnSignals failed");
655            },
656            async {
657                // Server
658                let service = GptService::new();
659                service
660                    .run(outgoing_dir_server.into_channel(), Some(lifecycle_server.into_channel()))
661                    .await
662                    .expect("Run failed");
663            },
664            async {
665                // Block device
666                let server = setup_server(
667                    512,
668                    16,
669                    vec![PartitionInfo {
670                        label: "part".to_string(),
671                        type_guid: Guid::from_bytes([0xabu8; 16]),
672                        instance_guid: Guid::from_bytes([0xcdu8; 16]),
673                        start_block: 4,
674                        num_blocks: 1,
675                        flags: 0,
676                    }],
677                )
678                .await;
679                let _ = server.serve(volume_stream).await;
680            }
681            .fuse(),
682        );
683    }
684}