gpt_component/
gpt.rs

1// Copyright 2024 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use crate::config::Config;
6use crate::partition::PartitionBackend;
7use crate::partitions_directory::PartitionsDirectory;
8use anyhow::{Context as _, Error, anyhow};
9use block_client::{
10    BlockClient as _, BufferSlice, MutableBufferSlice, ReadOptions, RemoteBlockClient, VmoId,
11    WriteOptions,
12};
13use block_server::BlockServer;
14use block_server::async_interface::SessionManager;
15
16use fidl::endpoints::ServerEnd;
17use fuchsia_sync::Mutex;
18use futures::stream::TryStreamExt as _;
19use std::collections::BTreeMap;
20use std::num::NonZero;
21use std::sync::atomic::{AtomicBool, Ordering};
22use std::sync::{Arc, Weak};
23use zx::AsHandleRef as _;
24use {
25    fidl_fuchsia_hardware_block as fblock, fidl_fuchsia_storage_partitions as fpartitions,
26    fuchsia_async as fasync,
27};
28
29fn partition_directory_entry_name(index: u32) -> String {
30    format!("part-{:03}", index)
31}
32
33/// A single partition in a GPT device.
34pub struct GptPartition {
35    gpt: Weak<GptManager>,
36    info: Mutex<gpt::PartitionInfo>,
37    block_client: Arc<RemoteBlockClient>,
38}
39
40fn trace_id(trace_flow_id: Option<NonZero<u64>>) -> u64 {
41    trace_flow_id.map(|v| v.get()).unwrap_or_default()
42}
43
44impl GptPartition {
45    pub fn new(
46        gpt: &Arc<GptManager>,
47        block_client: Arc<RemoteBlockClient>,
48        info: gpt::PartitionInfo,
49    ) -> Arc<Self> {
50        Arc::new(Self { gpt: Arc::downgrade(gpt), info: Mutex::new(info), block_client })
51    }
52
53    pub async fn terminate(&self) {
54        if let Err(error) = self.block_client.close().await {
55            log::warn!(error:?; "Failed to close block client");
56        }
57    }
58
59    /// Replaces the partition info, returning its old value.
60    pub fn update_info(&self, info: gpt::PartitionInfo) -> gpt::PartitionInfo {
61        std::mem::replace(&mut *self.info.lock(), info)
62    }
63
64    pub fn block_size(&self) -> u32 {
65        self.block_client.block_size()
66    }
67
68    pub fn block_count(&self) -> u64 {
69        self.info.lock().num_blocks
70    }
71
72    pub async fn attach_vmo(&self, vmo: &zx::Vmo) -> Result<VmoId, zx::Status> {
73        self.block_client.attach_vmo(vmo).await
74    }
75
76    pub async fn detach_vmo(&self, vmoid: VmoId) -> Result<(), zx::Status> {
77        self.block_client.detach_vmo(vmoid).await
78    }
79
80    pub fn open_passthrough_session(&self, session: ServerEnd<fblock::SessionMarker>) {
81        if let Some(gpt) = self.gpt.upgrade() {
82            let mapping = {
83                let info = self.info.lock();
84                fblock::BlockOffsetMapping {
85                    source_block_offset: 0,
86                    target_block_offset: info.start_block,
87                    length: info.num_blocks,
88                }
89            };
90            if let Err(err) = gpt.block_proxy.open_session_with_offset_map(session, &mapping) {
91                // Client errors normally come back on `session` but that was already consumed.  The
92                // client will get a PEER_CLOSED without an epitaph.
93                log::warn!(err:?; "Failed to open passthrough session");
94            }
95        } else {
96            if let Err(err) = session.close_with_epitaph(zx::Status::BAD_STATE) {
97                log::warn!(err:?; "Failed to send session epitaph");
98            }
99        }
100    }
101
102    pub fn get_info(&self) -> block_server::DeviceInfo {
103        convert_partition_info(
104            &*self.info.lock(),
105            self.block_client.block_flags(),
106            self.block_client.max_transfer_blocks(),
107        )
108    }
109
110    pub async fn read(
111        &self,
112        device_block_offset: u64,
113        block_count: u32,
114        vmo_id: &VmoId,
115        vmo_offset: u64, // *bytes* not blocks
116        opts: ReadOptions,
117        trace_flow_id: Option<NonZero<u64>>,
118    ) -> Result<(), zx::Status> {
119        let dev_offset = self
120            .absolute_offset(device_block_offset, block_count)
121            .map(|offset| offset * self.block_size() as u64)?;
122        let buffer = MutableBufferSlice::new_with_vmo_id(
123            vmo_id,
124            vmo_offset,
125            (block_count * self.block_size()) as u64,
126        );
127        self.block_client
128            .read_at_with_opts_traced(buffer, dev_offset, opts, trace_id(trace_flow_id))
129            .await
130    }
131
132    pub async fn write(
133        &self,
134        device_block_offset: u64,
135        block_count: u32,
136        vmo_id: &VmoId,
137        vmo_offset: u64, // *bytes* not blocks
138        opts: WriteOptions,
139        trace_flow_id: Option<NonZero<u64>>,
140    ) -> Result<(), zx::Status> {
141        let dev_offset = self
142            .absolute_offset(device_block_offset, block_count)
143            .map(|offset| offset * self.block_size() as u64)?;
144        let buffer = BufferSlice::new_with_vmo_id(
145            vmo_id,
146            vmo_offset,
147            (block_count * self.block_size()) as u64,
148        );
149        self.block_client
150            .write_at_with_opts_traced(buffer, dev_offset, opts, trace_id(trace_flow_id))
151            .await
152    }
153
154    pub async fn flush(&self, trace_flow_id: Option<NonZero<u64>>) -> Result<(), zx::Status> {
155        self.block_client.flush_traced(trace_id(trace_flow_id)).await
156    }
157
158    pub async fn trim(
159        &self,
160        device_block_offset: u64,
161        block_count: u32,
162        trace_flow_id: Option<NonZero<u64>>,
163    ) -> Result<(), zx::Status> {
164        let dev_offset = self
165            .absolute_offset(device_block_offset, block_count)
166            .map(|offset| offset * self.block_size() as u64)?;
167        let len = block_count as u64 * self.block_size() as u64;
168        self.block_client.trim_traced(dev_offset..dev_offset + len, trace_id(trace_flow_id)).await
169    }
170
171    // Converts a relative range specified by [offset, offset+len) into an absolute offset in the
172    // GPT device, performing bounds checking within the partition.  Returns ZX_ERR_OUT_OF_RANGE for
173    // an invalid offset/len.
174    fn absolute_offset(&self, mut offset: u64, len: u32) -> Result<u64, zx::Status> {
175        let info = self.info.lock();
176        offset = offset.checked_add(info.start_block).ok_or(zx::Status::OUT_OF_RANGE)?;
177        let end = offset.checked_add(len as u64).ok_or(zx::Status::OUT_OF_RANGE)?;
178        if end > info.start_block + info.num_blocks {
179            Err(zx::Status::OUT_OF_RANGE)
180        } else {
181            Ok(offset)
182        }
183    }
184}
185
186fn convert_partition_info(
187    info: &gpt::PartitionInfo,
188    device_flags: fblock::Flag,
189    max_transfer_blocks: Option<NonZero<u32>>,
190) -> block_server::DeviceInfo {
191    block_server::DeviceInfo::Partition(block_server::PartitionInfo {
192        device_flags,
193        max_transfer_blocks,
194        block_range: Some(info.start_block..info.start_block + info.num_blocks),
195        type_guid: info.type_guid.to_bytes(),
196        instance_guid: info.instance_guid.to_bytes(),
197        name: info.label.clone(),
198        flags: info.flags,
199    })
200}
201
202fn can_merge(a: &gpt::PartitionInfo, b: &gpt::PartitionInfo) -> bool {
203    a.start_block + a.num_blocks == b.start_block
204}
205
206struct PendingTransaction {
207    transaction: gpt::Transaction,
208    client_koid: zx::Koid,
209    // A list of indexes for partitions which were added in the transaction.  When committing, all
210    // newly created partitions are published.
211    added_partitions: Vec<u32>,
212    // A task which waits for the client end to be closed and clears the pending transaction.
213    _signal_task: fasync::Task<()>,
214}
215
216struct Inner {
217    gpt: gpt::Gpt,
218    partitions: BTreeMap<u32, Arc<BlockServer<SessionManager<PartitionBackend>>>>,
219    // We track these separately so that we do not update them during transaction commit.
220    overlay_partitions: BTreeMap<u32, Arc<BlockServer<SessionManager<PartitionBackend>>>>,
221    // Exposes all partitions for discovery by other components.  Should be kept in sync with
222    // `partitions`.
223    partitions_dir: PartitionsDirectory,
224    pending_transaction: Option<PendingTransaction>,
225}
226
227impl Inner {
228    /// Ensures that `transaction` matches our pending transaction.
229    fn ensure_transaction_matches(&self, transaction: &zx::EventPair) -> Result<(), zx::Status> {
230        if let Some(pending) = self.pending_transaction.as_ref() {
231            if transaction.get_koid()? == pending.client_koid {
232                Ok(())
233            } else {
234                Err(zx::Status::BAD_HANDLE)
235            }
236        } else {
237            Err(zx::Status::BAD_STATE)
238        }
239    }
240
241    fn bind_partition(
242        &mut self,
243        parent: &Arc<GptManager>,
244        index: u32,
245        info: gpt::PartitionInfo,
246        overlay_indexes: Vec<usize>,
247    ) -> Result<(), Error> {
248        log::trace!(
249            "GPT part {index}{}: {info:?}",
250            if !overlay_indexes.is_empty() { " (overlay)" } else { "" }
251        );
252        info.start_block
253            .checked_add(info.num_blocks)
254            .ok_or_else(|| anyhow!("Overflow in partition end"))?;
255        let partition =
256            PartitionBackend::new(GptPartition::new(parent, self.gpt.client().clone(), info));
257        let block_server = Arc::new(BlockServer::new(parent.block_size, partition));
258        if !overlay_indexes.is_empty() {
259            self.partitions_dir.add_overlay(
260                &partition_directory_entry_name(index),
261                Arc::downgrade(&block_server),
262                Arc::downgrade(parent),
263                overlay_indexes,
264            );
265            self.overlay_partitions.insert(index, block_server);
266        } else {
267            self.partitions_dir.add_partition(
268                &partition_directory_entry_name(index),
269                Arc::downgrade(&block_server),
270                Arc::downgrade(parent),
271                index as usize,
272            );
273            self.partitions.insert(index, block_server);
274        }
275        Ok(())
276    }
277
278    fn bind_super_and_userdata_partition(
279        &mut self,
280        parent: &Arc<GptManager>,
281        super_partition: (u32, gpt::PartitionInfo),
282        userdata_partition: (u32, gpt::PartitionInfo),
283    ) -> Result<(), Error> {
284        let info = gpt::PartitionInfo {
285            // TODO(https://fxbug.dev/443980711): This should come from configuration.
286            label: "super_and_userdata".to_string(),
287            type_guid: super_partition.1.type_guid.clone(),
288            instance_guid: super_partition.1.instance_guid.clone(),
289            start_block: super_partition.1.start_block,
290            num_blocks: super_partition.1.num_blocks + userdata_partition.1.num_blocks,
291            flags: super_partition.1.flags,
292        };
293        log::trace!(
294            "GPT merged parts {:?} + {:?} -> {info:?}",
295            super_partition.1,
296            userdata_partition.1
297        );
298        self.bind_partition(
299            parent,
300            super_partition.0,
301            info,
302            vec![super_partition.0 as usize, userdata_partition.0 as usize],
303        )
304    }
305
306    fn bind_all_partitions(&mut self, parent: &Arc<GptManager>) -> Result<(), Error> {
307        self.partitions.clear();
308        self.overlay_partitions.clear();
309        self.partitions_dir.clear();
310
311        let mut partitions = self.gpt.partitions().clone();
312        if parent.config.merge_super_and_userdata {
313            // Attempt to merge the first `super` and `userdata` we find.  The rest will be treated
314            // as regular partitions.
315            let super_part = match partitions
316                .iter()
317                .find(|(_, info)| info.label == "super")
318                .map(|(index, _)| *index)
319            {
320                Some(index) => partitions.remove_entry(&index),
321                None => None,
322            };
323            let userdata_part = match partitions
324                .iter()
325                .find(|(_, info)| info.label == "userdata")
326                .map(|(index, _)| *index)
327            {
328                Some(index) => partitions.remove_entry(&index),
329                None => None,
330            };
331            if super_part.is_some() && userdata_part.is_some() {
332                let super_part = super_part.unwrap();
333                let userdata_part = userdata_part.unwrap();
334                if can_merge(&super_part.1, &userdata_part.1) {
335                    self.bind_super_and_userdata_partition(parent, super_part, userdata_part)?;
336                } else {
337                    log::warn!("super/userdata cannot be merged");
338                    self.bind_partition(parent, super_part.0, super_part.1, vec![])?;
339                    self.bind_partition(parent, userdata_part.0, userdata_part.1, vec![])?;
340                }
341            } else if super_part.is_some() || userdata_part.is_some() {
342                log::warn!("Only one of super/userdata found; not merging");
343                let (index, info) = super_part.or(userdata_part).unwrap();
344                self.bind_partition(parent, index, info, vec![])?;
345            }
346        }
347        for (index, info) in partitions {
348            self.bind_partition(parent, index, info, vec![])?;
349        }
350        Ok(())
351    }
352
353    fn add_partition(&mut self, info: gpt::PartitionInfo) -> Result<usize, gpt::AddPartitionError> {
354        let pending = self.pending_transaction.as_mut().unwrap();
355        let idx = self.gpt.add_partition(&mut pending.transaction, info)?;
356        pending.added_partitions.push(idx as u32);
357        Ok(idx)
358    }
359}
360
361/// Runs a GPT device.
362pub struct GptManager {
363    config: Config,
364    block_proxy: fblock::BlockProxy,
365    block_size: u32,
366    block_count: u64,
367    inner: futures::lock::Mutex<Inner>,
368    shutdown: AtomicBool,
369}
370
371impl std::fmt::Debug for GptManager {
372    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
373        f.debug_struct("GptManager")
374            .field("block_size", &self.block_size)
375            .field("block_count", &self.block_count)
376            .finish()
377    }
378}
379
380impl GptManager {
381    pub async fn new(
382        block_proxy: fblock::BlockProxy,
383        partitions_dir: Arc<vfs::directory::immutable::Simple>,
384    ) -> Result<Arc<Self>, Error> {
385        Self::new_with_config(block_proxy, partitions_dir, Config::default()).await
386    }
387
388    pub async fn new_with_config(
389        block_proxy: fblock::BlockProxy,
390        partitions_dir: Arc<vfs::directory::immutable::Simple>,
391        config: Config,
392    ) -> Result<Arc<Self>, Error> {
393        log::info!("Binding to GPT");
394        let client = Arc::new(RemoteBlockClient::new(block_proxy.clone()).await?);
395        let block_size = client.block_size();
396        let block_count = client.block_count();
397        let gpt = gpt::Gpt::open(client).await.context("Failed to load GPT")?;
398
399        let this = Arc::new(Self {
400            config,
401            block_proxy,
402            block_size,
403            block_count,
404            inner: futures::lock::Mutex::new(Inner {
405                gpt,
406                partitions: BTreeMap::new(),
407                overlay_partitions: BTreeMap::new(),
408                partitions_dir: PartitionsDirectory::new(partitions_dir),
409                pending_transaction: None,
410            }),
411            shutdown: AtomicBool::new(false),
412        });
413        this.inner.lock().await.bind_all_partitions(&this)?;
414        log::info!("Starting all partitions OK!");
415        Ok(this)
416    }
417
418    pub fn block_size(&self) -> u32 {
419        self.block_size
420    }
421
422    pub fn block_count(&self) -> u64 {
423        self.block_count
424    }
425
426    pub async fn create_transaction(self: &Arc<Self>) -> Result<zx::EventPair, zx::Status> {
427        let mut inner = self.inner.lock().await;
428        if inner.pending_transaction.is_some() {
429            return Err(zx::Status::ALREADY_EXISTS);
430        }
431        let transaction = inner.gpt.create_transaction().unwrap();
432        let (client_end, server_end) = zx::EventPair::create();
433        let client_koid = client_end.get_koid()?;
434        let signal_waiter = fasync::OnSignals::new(server_end, zx::Signals::EVENTPAIR_PEER_CLOSED);
435        let this = self.clone();
436        let task = fasync::Task::spawn(async move {
437            let _ = signal_waiter.await;
438            let mut inner = this.inner.lock().await;
439            if inner.pending_transaction.as_ref().map_or(false, |t| t.client_koid == client_koid) {
440                inner.pending_transaction = None;
441            }
442        });
443        inner.pending_transaction = Some(PendingTransaction {
444            transaction,
445            client_koid,
446            added_partitions: vec![],
447            _signal_task: task,
448        });
449        Ok(client_end)
450    }
451
452    pub async fn commit_transaction(
453        self: &Arc<Self>,
454        transaction: zx::EventPair,
455    ) -> Result<(), zx::Status> {
456        let mut inner = self.inner.lock().await;
457        inner.ensure_transaction_matches(&transaction)?;
458        let pending = std::mem::take(&mut inner.pending_transaction).unwrap();
459        let partitions = pending.transaction.partitions.clone();
460        if let Err(err) = inner.gpt.commit_transaction(pending.transaction).await {
461            log::warn!(err:?; "Failed to commit transaction");
462            return Err(zx::Status::IO);
463        }
464        // Everything after this point should be infallible.
465        for (info, idx) in partitions
466            .iter()
467            .zip(0u32..)
468            .filter(|(info, idx)| !info.is_nil() && !pending.added_partitions.contains(idx))
469        {
470            // Some physical partitions are not tracked in `inner.partitions` (e.g. when we use an
471            // overlay partition to combine two physical partitions).  In this case, we still need
472            // to propagate the info in the underlying transaction, but there's no need to update
473            // the in-memory info.
474            // Note that overlay partitions can't be changed by transactions anyways, so the info
475            // we propagate should be exactly what it was when we created the transaction.
476            if let Some(part) = inner.partitions.get(&idx) {
477                part.session_manager().interface().update_info(info.clone());
478            }
479        }
480        for idx in pending.added_partitions {
481            if let Some(info) = inner.gpt.partitions().get(&idx).cloned() {
482                if let Err(err) = inner.bind_partition(self, idx, info, vec![]) {
483                    log::error!(err:?; "Failed to bind partition");
484                }
485            }
486        }
487        Ok(())
488    }
489
490    pub async fn add_partition(
491        &self,
492        request: fpartitions::PartitionsManagerAddPartitionRequest,
493    ) -> Result<(), zx::Status> {
494        let mut inner = self.inner.lock().await;
495        inner.ensure_transaction_matches(
496            request.transaction.as_ref().ok_or(zx::Status::BAD_HANDLE)?,
497        )?;
498        let info = gpt::PartitionInfo {
499            label: request.name.ok_or(zx::Status::INVALID_ARGS)?,
500            type_guid: request
501                .type_guid
502                .map(|value| gpt::Guid::from_bytes(value.value))
503                .ok_or(zx::Status::INVALID_ARGS)?,
504            instance_guid: request
505                .instance_guid
506                .map(|value| gpt::Guid::from_bytes(value.value))
507                .unwrap_or_else(|| gpt::Guid::generate()),
508            start_block: 0,
509            num_blocks: request.num_blocks.ok_or(zx::Status::INVALID_ARGS)?,
510            flags: request.flags.unwrap_or_default(),
511        };
512        let idx = inner.add_partition(info)?;
513        let partition =
514            inner.pending_transaction.as_ref().unwrap().transaction.partitions.get(idx).unwrap();
515        log::info!(
516            "Allocated partition {:?} at {:?}",
517            partition.label,
518            partition.start_block..partition.start_block + partition.num_blocks
519        );
520        Ok(())
521    }
522
523    pub async fn handle_partitions_requests(
524        &self,
525        gpt_index: usize,
526        mut requests: fpartitions::PartitionRequestStream,
527    ) -> Result<(), zx::Status> {
528        while let Some(request) = requests.try_next().await.unwrap() {
529            match request {
530                fpartitions::PartitionRequest::UpdateMetadata { payload, responder } => {
531                    responder
532                        .send(
533                            self.update_partition_metadata(gpt_index, payload)
534                                .await
535                                .map_err(|status| status.into_raw()),
536                        )
537                        .unwrap_or_else(
538                            |err| log::error!(err:?; "Failed to send UpdateMetadata response"),
539                        );
540                }
541            }
542        }
543        Ok(())
544    }
545
546    async fn update_partition_metadata(
547        &self,
548        gpt_index: usize,
549        request: fpartitions::PartitionUpdateMetadataRequest,
550    ) -> Result<(), zx::Status> {
551        let mut inner = self.inner.lock().await;
552        inner.ensure_transaction_matches(
553            request.transaction.as_ref().ok_or(zx::Status::BAD_HANDLE)?,
554        )?;
555
556        let transaction = &mut inner.pending_transaction.as_mut().unwrap().transaction;
557        let entry = transaction.partitions.get_mut(gpt_index).ok_or(zx::Status::BAD_STATE)?;
558        if let Some(type_guid) = request.type_guid.as_ref().cloned() {
559            entry.type_guid = gpt::Guid::from_bytes(type_guid.value);
560        }
561        if let Some(flags) = request.flags.as_ref() {
562            entry.flags = *flags;
563        }
564        Ok(())
565    }
566
567    pub async fn handle_overlay_partitions_requests(
568        &self,
569        gpt_indexes: Vec<usize>,
570        mut requests: fpartitions::OverlayPartitionRequestStream,
571    ) -> Result<(), zx::Status> {
572        while let Some(request) = requests.try_next().await.unwrap() {
573            match request {
574                fpartitions::OverlayPartitionRequest::GetPartitions { responder } => {
575                    match self.get_overlay_partition_info(&gpt_indexes[..]).await {
576                        Ok(partitions) => responder.send(Ok(&partitions[..])),
577                        Err(status) => responder.send(Err(status.into_raw())),
578                    }
579                    .unwrap_or_else(
580                        |err| log::error!(err:?; "Failed to send GetPartitions response"),
581                    );
582                }
583            }
584        }
585        Ok(())
586    }
587
588    async fn get_overlay_partition_info(
589        &self,
590        gpt_indexes: &[usize],
591    ) -> Result<Vec<fpartitions::PartitionInfo>, zx::Status> {
592        fn convert_partition_info(info: &gpt::PartitionInfo) -> fpartitions::PartitionInfo {
593            fpartitions::PartitionInfo {
594                name: info.label.to_string(),
595                type_guid: fidl_fuchsia_hardware_block_partition::Guid {
596                    value: info.type_guid.to_bytes(),
597                },
598                instance_guid: fidl_fuchsia_hardware_block_partition::Guid {
599                    value: info.instance_guid.to_bytes(),
600                },
601                start_block: info.start_block,
602                num_blocks: info.num_blocks,
603                flags: info.flags,
604            }
605        }
606
607        let inner = self.inner.lock().await;
608        let mut partitions = vec![];
609        for index in gpt_indexes {
610            let index: u32 = *index as u32;
611            partitions.push(
612                inner
613                    .gpt
614                    .partitions()
615                    .get(&index)
616                    .map(convert_partition_info)
617                    .ok_or(zx::Status::BAD_STATE)?,
618            );
619        }
620        Ok(partitions)
621    }
622
623    pub async fn reset_partition_table(
624        self: &Arc<Self>,
625        partitions: Vec<gpt::PartitionInfo>,
626    ) -> Result<(), zx::Status> {
627        let mut inner = self.inner.lock().await;
628        if inner.pending_transaction.is_some() {
629            return Err(zx::Status::BAD_STATE);
630        }
631
632        log::info!("Resetting gpt.  Expect data loss!!!");
633        let mut transaction = inner.gpt.create_transaction().unwrap();
634        transaction.partitions = partitions;
635        inner.gpt.commit_transaction(transaction).await?;
636
637        if let Err(err) = inner.bind_all_partitions(&self) {
638            log::error!(err:?; "Failed to rebind partitions");
639            return Err(zx::Status::BAD_STATE);
640        }
641        log::info!("Rebinding partitions OK!");
642        Ok(())
643    }
644
645    pub async fn shutdown(self: Arc<Self>) {
646        log::info!("Shutting down gpt");
647        let mut inner = self.inner.lock().await;
648        inner.partitions_dir.clear();
649        inner.partitions.clear();
650        inner.overlay_partitions.clear();
651        self.shutdown.store(true, Ordering::Relaxed);
652        log::info!("Shutting down gpt OK");
653    }
654}
655
656impl Drop for GptManager {
657    fn drop(&mut self) {
658        assert!(self.shutdown.load(Ordering::Relaxed), "Did you forget to shutdown?");
659    }
660}
661
662#[cfg(test)]
663mod tests {
664    use super::GptManager;
665    use block_client::{
666        BlockClient as _, BufferSlice, MutableBufferSlice, RemoteBlockClient, WriteFlags,
667    };
668    use block_server::{BlockInfo, DeviceInfo, WriteOptions};
669    use fidl::HandleBased as _;
670    use fuchsia_component::client::connect_to_named_protocol_at_dir_root;
671    use gpt::{Gpt, Guid, PartitionInfo};
672    use std::num::NonZero;
673    use std::sync::Arc;
674    use std::sync::atomic::{AtomicBool, Ordering};
675    use vmo_backed_block_server::{
676        InitialContents, VmoBackedServer, VmoBackedServerOptions, VmoBackedServerTestingExt as _,
677    };
678    use {
679        fidl_fuchsia_hardware_block as fblock, fidl_fuchsia_hardware_block_volume as fvolume,
680        fidl_fuchsia_io as fio, fidl_fuchsia_storage_partitions as fpartitions,
681        fuchsia_async as fasync,
682    };
683
684    async fn setup(
685        block_size: u32,
686        block_count: u64,
687        partitions: Vec<PartitionInfo>,
688    ) -> (Arc<VmoBackedServer>, Arc<vfs::directory::immutable::Simple>) {
689        setup_with_options(
690            VmoBackedServerOptions {
691                initial_contents: InitialContents::FromCapacity(block_count),
692                block_size,
693                ..Default::default()
694            },
695            partitions,
696        )
697        .await
698    }
699
700    async fn setup_with_options(
701        opts: VmoBackedServerOptions<'_>,
702        partitions: Vec<PartitionInfo>,
703    ) -> (Arc<VmoBackedServer>, Arc<vfs::directory::immutable::Simple>) {
704        let server = Arc::new(opts.build().unwrap());
705        {
706            let (block_client, block_server) =
707                fidl::endpoints::create_proxy::<fblock::BlockMarker>();
708            let volume_stream = fidl::endpoints::ServerEnd::<fvolume::VolumeMarker>::from(
709                block_server.into_channel(),
710            )
711            .into_stream();
712            let server_clone = server.clone();
713            let _task = fasync::Task::spawn(async move { server_clone.serve(volume_stream).await });
714            let client = Arc::new(RemoteBlockClient::new(block_client).await.unwrap());
715            Gpt::format(client, partitions).await.unwrap();
716        }
717        (server, vfs::directory::immutable::simple())
718    }
719
720    #[fuchsia::test]
721    async fn load_unformatted_gpt() {
722        let vmo = zx::Vmo::create(4096).unwrap();
723        let server = Arc::new(VmoBackedServer::from_vmo(512, vmo));
724
725        GptManager::new(server.connect(), vfs::directory::immutable::simple())
726            .await
727            .expect_err("load should fail");
728    }
729
730    #[fuchsia::test]
731    async fn load_formatted_empty_gpt() {
732        let (block_device, partitions_dir) = setup(512, 8, vec![]).await;
733
734        let runner = GptManager::new(block_device.connect(), partitions_dir)
735            .await
736            .expect("load should succeed");
737        runner.shutdown().await;
738    }
739
740    #[fuchsia::test]
741    async fn load_formatted_gpt_with_one_partition() {
742        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
743        const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
744        const PART_NAME: &str = "part";
745
746        let (block_device, partitions_dir) = setup(
747            512,
748            8,
749            vec![PartitionInfo {
750                label: PART_NAME.to_string(),
751                type_guid: Guid::from_bytes(PART_TYPE_GUID),
752                instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
753                start_block: 4,
754                num_blocks: 1,
755                flags: 0,
756            }],
757        )
758        .await;
759
760        let partitions_dir_clone = partitions_dir.clone();
761        let runner = GptManager::new(block_device.connect(), partitions_dir_clone)
762            .await
763            .expect("load should succeed");
764        partitions_dir.get_entry("part-000").expect("No entry found");
765        partitions_dir.get_entry("part-001").map(|_| ()).expect_err("Extra entry found");
766        runner.shutdown().await;
767    }
768
769    #[fuchsia::test]
770    async fn load_formatted_gpt_with_two_partitions() {
771        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
772        const PART_INSTANCE_1_GUID: [u8; 16] = [2u8; 16];
773        const PART_INSTANCE_2_GUID: [u8; 16] = [3u8; 16];
774        const PART_1_NAME: &str = "part1";
775        const PART_2_NAME: &str = "part2";
776
777        let (block_device, partitions_dir) = setup(
778            512,
779            8,
780            vec![
781                PartitionInfo {
782                    label: PART_1_NAME.to_string(),
783                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
784                    instance_guid: Guid::from_bytes(PART_INSTANCE_1_GUID),
785                    start_block: 4,
786                    num_blocks: 1,
787                    flags: 0,
788                },
789                PartitionInfo {
790                    label: PART_2_NAME.to_string(),
791                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
792                    instance_guid: Guid::from_bytes(PART_INSTANCE_2_GUID),
793                    start_block: 5,
794                    num_blocks: 1,
795                    flags: 0,
796                },
797            ],
798        )
799        .await;
800
801        let partitions_dir_clone = partitions_dir.clone();
802        let runner = GptManager::new(block_device.connect(), partitions_dir_clone)
803            .await
804            .expect("load should succeed");
805        partitions_dir.get_entry("part-000").expect("No entry found");
806        partitions_dir.get_entry("part-001").expect("No entry found");
807        partitions_dir.get_entry("part-002").map(|_| ()).expect_err("Extra entry found");
808        runner.shutdown().await;
809    }
810
811    #[fuchsia::test]
812    async fn partition_io() {
813        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
814        const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
815        const PART_NAME: &str = "part";
816
817        let (block_device, partitions_dir) = setup(
818            512,
819            8,
820            vec![PartitionInfo {
821                label: PART_NAME.to_string(),
822                type_guid: Guid::from_bytes(PART_TYPE_GUID),
823                instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
824                start_block: 4,
825                num_blocks: 2,
826                flags: 0,
827            }],
828        )
829        .await;
830
831        let partitions_dir_clone = partitions_dir.clone();
832        let runner = GptManager::new(block_device.connect(), partitions_dir_clone)
833            .await
834            .expect("load should succeed");
835
836        let proxy = vfs::serve_directory(
837            partitions_dir.clone(),
838            vfs::path::Path::validate_and_split("part-000").unwrap(),
839            fio::PERM_READABLE,
840        );
841        let block =
842            connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&proxy, "volume")
843                .expect("Failed to open block service");
844        let client = RemoteBlockClient::new(block).await.expect("Failed to create block client");
845
846        assert_eq!(client.block_count(), 2);
847        assert_eq!(client.block_size(), 512);
848
849        let buf = vec![0xabu8; 512];
850        client.write_at(BufferSlice::Memory(&buf[..]), 0).await.expect("write_at failed");
851        client
852            .write_at(BufferSlice::Memory(&buf[..]), 1024)
853            .await
854            .expect_err("write_at should fail when writing past partition end");
855        let mut buf2 = vec![0u8; 512];
856        client.read_at(MutableBufferSlice::Memory(&mut buf2[..]), 0).await.expect("read_at failed");
857        assert_eq!(buf, buf2);
858        client
859            .read_at(MutableBufferSlice::Memory(&mut buf2[..]), 1024)
860            .await
861            .expect_err("read_at should fail when reading past partition end");
862        client.trim(512..1024).await.expect("trim failed");
863        client.trim(1..512).await.expect_err("trim with invalid range should fail");
864        client.trim(1024..1536).await.expect_err("trim past end of partition should fail");
865        runner.shutdown().await;
866
867        // Ensure writes persisted to the partition.
868        let mut buf = vec![0u8; 512];
869        let client =
870            RemoteBlockClient::new(block_device.connect::<fblock::BlockProxy>()).await.unwrap();
871        client.read_at(MutableBufferSlice::Memory(&mut buf[..]), 2048).await.unwrap();
872        assert_eq!(&buf[..], &[0xabu8; 512]);
873    }
874
875    #[fuchsia::test]
876    async fn load_formatted_gpt_with_invalid_primary_header() {
877        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
878        const PART_INSTANCE_1_GUID: [u8; 16] = [2u8; 16];
879        const PART_INSTANCE_2_GUID: [u8; 16] = [3u8; 16];
880        const PART_1_NAME: &str = "part1";
881        const PART_2_NAME: &str = "part2";
882
883        let (block_device, partitions_dir) = setup(
884            512,
885            8,
886            vec![
887                PartitionInfo {
888                    label: PART_1_NAME.to_string(),
889                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
890                    instance_guid: Guid::from_bytes(PART_INSTANCE_1_GUID),
891                    start_block: 4,
892                    num_blocks: 1,
893                    flags: 0,
894                },
895                PartitionInfo {
896                    label: PART_2_NAME.to_string(),
897                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
898                    instance_guid: Guid::from_bytes(PART_INSTANCE_2_GUID),
899                    start_block: 5,
900                    num_blocks: 1,
901                    flags: 0,
902                },
903            ],
904        )
905        .await;
906        {
907            let (client, stream) =
908                fidl::endpoints::create_proxy_and_stream::<fvolume::VolumeMarker>();
909            let server = block_device.clone();
910            let _task = fasync::Task::spawn(async move { server.serve(stream).await });
911            let client = RemoteBlockClient::new(client).await.unwrap();
912            client.write_at(BufferSlice::Memory(&[0xffu8; 512]), 512).await.unwrap();
913        }
914
915        let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
916            .await
917            .expect("load should succeed");
918        partitions_dir.get_entry("part-000").expect("No entry found");
919        partitions_dir.get_entry("part-001").expect("No entry found");
920        runner.shutdown().await;
921    }
922
923    #[fuchsia::test]
924    async fn load_formatted_gpt_with_invalid_primary_partition_table() {
925        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
926        const PART_INSTANCE_1_GUID: [u8; 16] = [2u8; 16];
927        const PART_INSTANCE_2_GUID: [u8; 16] = [3u8; 16];
928        const PART_1_NAME: &str = "part1";
929        const PART_2_NAME: &str = "part2";
930
931        let (block_device, partitions_dir) = setup(
932            512,
933            8,
934            vec![
935                PartitionInfo {
936                    label: PART_1_NAME.to_string(),
937                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
938                    instance_guid: Guid::from_bytes(PART_INSTANCE_1_GUID),
939                    start_block: 4,
940                    num_blocks: 1,
941                    flags: 0,
942                },
943                PartitionInfo {
944                    label: PART_2_NAME.to_string(),
945                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
946                    instance_guid: Guid::from_bytes(PART_INSTANCE_2_GUID),
947                    start_block: 5,
948                    num_blocks: 1,
949                    flags: 0,
950                },
951            ],
952        )
953        .await;
954        {
955            let (client, stream) =
956                fidl::endpoints::create_proxy_and_stream::<fvolume::VolumeMarker>();
957            let server = block_device.clone();
958            let _task = fasync::Task::spawn(async move { server.serve(stream).await });
959            let client = RemoteBlockClient::new(client).await.unwrap();
960            client.write_at(BufferSlice::Memory(&[0xffu8; 512]), 1024).await.unwrap();
961        }
962
963        let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
964            .await
965            .expect("load should succeed");
966        partitions_dir.get_entry("part-000").expect("No entry found");
967        partitions_dir.get_entry("part-001").expect("No entry found");
968        runner.shutdown().await;
969    }
970
971    #[fuchsia::test]
972    async fn force_access_passed_through() {
973        const BLOCK_SIZE: u32 = 512;
974        const BLOCK_COUNT: u64 = 1024;
975
976        struct Observer(Arc<AtomicBool>);
977
978        impl vmo_backed_block_server::Observer for Observer {
979            fn write(
980                &self,
981                _device_block_offset: u64,
982                _block_count: u32,
983                _vmo: &Arc<zx::Vmo>,
984                _vmo_offset: u64,
985                opts: WriteOptions,
986            ) -> vmo_backed_block_server::WriteAction {
987                assert_eq!(
988                    opts.flags.contains(WriteFlags::FORCE_ACCESS),
989                    self.0.load(Ordering::Relaxed)
990                );
991                vmo_backed_block_server::WriteAction::Write
992            }
993        }
994
995        let expect_force_access = Arc::new(AtomicBool::new(false));
996        let (server, partitions_dir) = setup_with_options(
997            VmoBackedServerOptions {
998                initial_contents: InitialContents::FromCapacity(BLOCK_COUNT),
999                block_size: BLOCK_SIZE,
1000                observer: Some(Box::new(Observer(expect_force_access.clone()))),
1001                info: DeviceInfo::Block(BlockInfo {
1002                    device_flags: fblock::Flag::FUA_SUPPORT,
1003                    ..Default::default()
1004                }),
1005                ..Default::default()
1006            },
1007            vec![PartitionInfo {
1008                label: "foo".to_string(),
1009                type_guid: Guid::from_bytes([1; 16]),
1010                instance_guid: Guid::from_bytes([2; 16]),
1011                start_block: 4,
1012                num_blocks: 1,
1013                flags: 0,
1014            }],
1015        )
1016        .await;
1017
1018        let manager = GptManager::new(server.connect(), partitions_dir.clone()).await.unwrap();
1019
1020        let proxy = vfs::serve_directory(
1021            partitions_dir.clone(),
1022            vfs::path::Path::validate_and_split("part-000").unwrap(),
1023            fio::PERM_READABLE,
1024        );
1025        let block =
1026            connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&proxy, "volume")
1027                .expect("Failed to open block service");
1028        let client = RemoteBlockClient::new(block).await.expect("Failed to create block client");
1029
1030        let buffer = vec![0; BLOCK_SIZE as usize];
1031        client.write_at(BufferSlice::Memory(&buffer), 0).await.unwrap();
1032
1033        expect_force_access.store(true, Ordering::Relaxed);
1034
1035        client
1036            .write_at_with_opts(
1037                BufferSlice::Memory(&buffer),
1038                0,
1039                WriteOptions { flags: WriteFlags::FORCE_ACCESS, ..Default::default() },
1040            )
1041            .await
1042            .unwrap();
1043
1044        manager.shutdown().await;
1045    }
1046
1047    #[fuchsia::test]
1048    async fn barrier_passed_through() {
1049        const BLOCK_SIZE: u32 = 512;
1050        const BLOCK_COUNT: u64 = 1024;
1051
1052        struct Observer(Arc<AtomicBool>);
1053
1054        impl vmo_backed_block_server::Observer for Observer {
1055            fn write(
1056                &self,
1057                _device_block_offset: u64,
1058                _block_count: u32,
1059                _vmo: &Arc<zx::Vmo>,
1060                _vmo_offset: u64,
1061                opts: WriteOptions,
1062            ) -> vmo_backed_block_server::WriteAction {
1063                assert_eq!(
1064                    opts.flags.contains(WriteFlags::PRE_BARRIER),
1065                    self.0.load(Ordering::Relaxed)
1066                );
1067                vmo_backed_block_server::WriteAction::Write
1068            }
1069        }
1070
1071        let expect_barrier = Arc::new(AtomicBool::new(false));
1072        let (server, partitions_dir) = setup_with_options(
1073            VmoBackedServerOptions {
1074                initial_contents: InitialContents::FromCapacity(BLOCK_COUNT),
1075                block_size: BLOCK_SIZE,
1076                observer: Some(Box::new(Observer(expect_barrier.clone()))),
1077                info: DeviceInfo::Block(BlockInfo {
1078                    device_flags: fblock::Flag::BARRIER_SUPPORT,
1079                    ..Default::default()
1080                }),
1081                ..Default::default()
1082            },
1083            vec![PartitionInfo {
1084                label: "foo".to_string(),
1085                type_guid: Guid::from_bytes([1; 16]),
1086                instance_guid: Guid::from_bytes([2; 16]),
1087                start_block: 4,
1088                num_blocks: 1,
1089                flags: 0,
1090            }],
1091        )
1092        .await;
1093
1094        let manager = GptManager::new(server.connect(), partitions_dir.clone()).await.unwrap();
1095
1096        let proxy = vfs::serve_directory(
1097            partitions_dir.clone(),
1098            vfs::path::Path::validate_and_split("part-000").unwrap(),
1099            fio::PERM_READABLE,
1100        );
1101        let block =
1102            connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&proxy, "volume")
1103                .expect("Failed to open block service");
1104        let client = RemoteBlockClient::new(block).await.expect("Failed to create block client");
1105
1106        let buffer = vec![0; BLOCK_SIZE as usize];
1107        client.write_at(BufferSlice::Memory(&buffer), 0).await.unwrap();
1108
1109        expect_barrier.store(true, Ordering::Relaxed);
1110        client.barrier();
1111        client.write_at(BufferSlice::Memory(&buffer), 0).await.unwrap();
1112
1113        manager.shutdown().await;
1114    }
1115
1116    #[fuchsia::test]
1117    async fn commit_transaction() {
1118        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
1119        const PART_1_INSTANCE_GUID: [u8; 16] = [2u8; 16];
1120        const PART_1_NAME: &str = "part";
1121        const PART_2_INSTANCE_GUID: [u8; 16] = [3u8; 16];
1122        const PART_2_NAME: &str = "part2";
1123
1124        let (block_device, partitions_dir) = setup(
1125            512,
1126            16,
1127            vec![
1128                PartitionInfo {
1129                    label: PART_1_NAME.to_string(),
1130                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
1131                    instance_guid: Guid::from_bytes(PART_1_INSTANCE_GUID),
1132                    start_block: 4,
1133                    num_blocks: 1,
1134                    flags: 0,
1135                },
1136                PartitionInfo {
1137                    label: PART_2_NAME.to_string(),
1138                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
1139                    instance_guid: Guid::from_bytes(PART_2_INSTANCE_GUID),
1140                    start_block: 5,
1141                    num_blocks: 1,
1142                    flags: 0,
1143                },
1144            ],
1145        )
1146        .await;
1147        let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1148            .await
1149            .expect("load should succeed");
1150
1151        let part_0_dir = vfs::serve_directory(
1152            partitions_dir.clone(),
1153            vfs::Path::validate_and_split("part-000").unwrap(),
1154            fio::PERM_READABLE,
1155        );
1156        let part_1_dir = vfs::serve_directory(
1157            partitions_dir.clone(),
1158            vfs::Path::validate_and_split("part-001").unwrap(),
1159            fio::PERM_READABLE,
1160        );
1161        let part_0_proxy = connect_to_named_protocol_at_dir_root::<fpartitions::PartitionMarker>(
1162            &part_0_dir,
1163            "partition",
1164        )
1165        .expect("Failed to open Partition service");
1166        let part_1_proxy = connect_to_named_protocol_at_dir_root::<fpartitions::PartitionMarker>(
1167            &part_1_dir,
1168            "partition",
1169        )
1170        .expect("Failed to open Partition service");
1171
1172        let transaction = runner.create_transaction().await.expect("Failed to create transaction");
1173        part_0_proxy
1174            .update_metadata(fpartitions::PartitionUpdateMetadataRequest {
1175                transaction: Some(transaction.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()),
1176                type_guid: Some(fidl_fuchsia_hardware_block_partition::Guid {
1177                    value: [0xffu8; 16],
1178                }),
1179                ..Default::default()
1180            })
1181            .await
1182            .expect("FIDL error")
1183            .expect("Failed to update_metadata");
1184        part_1_proxy
1185            .update_metadata(fpartitions::PartitionUpdateMetadataRequest {
1186                transaction: Some(transaction.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()),
1187                flags: Some(1234),
1188                ..Default::default()
1189            })
1190            .await
1191            .expect("FIDL error")
1192            .expect("Failed to update_metadata");
1193        runner.commit_transaction(transaction).await.expect("Failed to commit transaction");
1194
1195        // Ensure the changes have propagated to the correct partitions.
1196        let part_0_block =
1197            connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&part_0_dir, "volume")
1198                .expect("Failed to open Volume service");
1199        let (status, guid) = part_0_block.get_type_guid().await.expect("FIDL error");
1200        assert_eq!(zx::Status::from_raw(status), zx::Status::OK);
1201        assert_eq!(guid.unwrap().value, [0xffu8; 16]);
1202        let part_1_block =
1203            connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&part_1_dir, "volume")
1204                .expect("Failed to open Volume service");
1205        let metadata =
1206            part_1_block.get_metadata().await.expect("FIDL error").expect("get_metadata failed");
1207        assert_eq!(metadata.type_guid.unwrap().value, PART_TYPE_GUID);
1208        assert_eq!(metadata.flags, Some(1234));
1209
1210        runner.shutdown().await;
1211    }
1212
1213    #[fuchsia::test]
1214    async fn commit_transaction_with_io_error() {
1215        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
1216        const PART_1_INSTANCE_GUID: [u8; 16] = [2u8; 16];
1217        const PART_1_NAME: &str = "part";
1218        const PART_2_INSTANCE_GUID: [u8; 16] = [3u8; 16];
1219        const PART_2_NAME: &str = "part2";
1220
1221        #[derive(Clone)]
1222        struct Observer(Arc<AtomicBool>);
1223        impl vmo_backed_block_server::Observer for Observer {
1224            fn write(
1225                &self,
1226                _device_block_offset: u64,
1227                _block_count: u32,
1228                _vmo: &Arc<zx::Vmo>,
1229                _vmo_offset: u64,
1230                _opts: WriteOptions,
1231            ) -> vmo_backed_block_server::WriteAction {
1232                if self.0.load(Ordering::Relaxed) {
1233                    vmo_backed_block_server::WriteAction::Fail
1234                } else {
1235                    vmo_backed_block_server::WriteAction::Write
1236                }
1237            }
1238        }
1239        let observer = Observer(Arc::new(AtomicBool::new(false)));
1240        let (block_device, partitions_dir) = setup_with_options(
1241            VmoBackedServerOptions {
1242                initial_contents: InitialContents::FromCapacity(16),
1243                block_size: 512,
1244                observer: Some(Box::new(observer.clone())),
1245                ..Default::default()
1246            },
1247            vec![
1248                PartitionInfo {
1249                    label: PART_1_NAME.to_string(),
1250                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
1251                    instance_guid: Guid::from_bytes(PART_1_INSTANCE_GUID),
1252                    start_block: 4,
1253                    num_blocks: 1,
1254                    flags: 0,
1255                },
1256                PartitionInfo {
1257                    label: PART_2_NAME.to_string(),
1258                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
1259                    instance_guid: Guid::from_bytes(PART_2_INSTANCE_GUID),
1260                    start_block: 5,
1261                    num_blocks: 1,
1262                    flags: 0,
1263                },
1264            ],
1265        )
1266        .await;
1267        let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1268            .await
1269            .expect("load should succeed");
1270
1271        let part_0_dir = vfs::serve_directory(
1272            partitions_dir.clone(),
1273            vfs::Path::validate_and_split("part-000").unwrap(),
1274            fio::PERM_READABLE,
1275        );
1276        let part_1_dir = vfs::serve_directory(
1277            partitions_dir.clone(),
1278            vfs::Path::validate_and_split("part-001").unwrap(),
1279            fio::PERM_READABLE,
1280        );
1281        let part_0_proxy = connect_to_named_protocol_at_dir_root::<fpartitions::PartitionMarker>(
1282            &part_0_dir,
1283            "partition",
1284        )
1285        .expect("Failed to open Partition service");
1286        let part_1_proxy = connect_to_named_protocol_at_dir_root::<fpartitions::PartitionMarker>(
1287            &part_1_dir,
1288            "partition",
1289        )
1290        .expect("Failed to open Partition service");
1291
1292        let transaction = runner.create_transaction().await.expect("Failed to create transaction");
1293        part_0_proxy
1294            .update_metadata(fpartitions::PartitionUpdateMetadataRequest {
1295                transaction: Some(transaction.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()),
1296                type_guid: Some(fidl_fuchsia_hardware_block_partition::Guid {
1297                    value: [0xffu8; 16],
1298                }),
1299                ..Default::default()
1300            })
1301            .await
1302            .expect("FIDL error")
1303            .expect("Failed to update_metadata");
1304        part_1_proxy
1305            .update_metadata(fpartitions::PartitionUpdateMetadataRequest {
1306                transaction: Some(transaction.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()),
1307                flags: Some(1234),
1308                ..Default::default()
1309            })
1310            .await
1311            .expect("FIDL error")
1312            .expect("Failed to update_metadata");
1313
1314        observer.0.store(true, Ordering::Relaxed); // Fail the next write
1315        runner.commit_transaction(transaction).await.expect_err("Commit transaction should fail");
1316
1317        // Ensure the changes did not get applied.
1318        let part_0_block =
1319            connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&part_0_dir, "volume")
1320                .expect("Failed to open Volume service");
1321        let (status, guid) = part_0_block.get_type_guid().await.expect("FIDL error");
1322        assert_eq!(zx::Status::from_raw(status), zx::Status::OK);
1323        assert_eq!(guid.unwrap().value, PART_TYPE_GUID);
1324        let part_1_block =
1325            connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&part_1_dir, "volume")
1326                .expect("Failed to open Volume service");
1327        let metadata =
1328            part_1_block.get_metadata().await.expect("FIDL error").expect("get_metadata failed");
1329        assert_eq!(metadata.type_guid.unwrap().value, PART_TYPE_GUID);
1330        assert_eq!(metadata.flags, Some(0));
1331
1332        runner.shutdown().await;
1333    }
1334
1335    #[fuchsia::test]
1336    async fn reset_partition_tables() {
1337        // The test will reset the tables from ["part", "part2"] to
1338        // ["part3", <empty>, "part4", <125 empty entries>].
1339        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
1340        const PART_1_INSTANCE_GUID: [u8; 16] = [2u8; 16];
1341        const PART_1_NAME: &str = "part";
1342        const PART_2_INSTANCE_GUID: [u8; 16] = [3u8; 16];
1343        const PART_2_NAME: &str = "part2";
1344        const PART_3_NAME: &str = "part3";
1345        const PART_4_NAME: &str = "part4";
1346
1347        let (block_device, partitions_dir) = setup(
1348            512,
1349            1048576 / 512,
1350            vec![
1351                PartitionInfo {
1352                    label: PART_1_NAME.to_string(),
1353                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
1354                    instance_guid: Guid::from_bytes(PART_1_INSTANCE_GUID),
1355                    start_block: 4,
1356                    num_blocks: 1,
1357                    flags: 0,
1358                },
1359                PartitionInfo {
1360                    label: PART_2_NAME.to_string(),
1361                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
1362                    instance_guid: Guid::from_bytes(PART_2_INSTANCE_GUID),
1363                    start_block: 5,
1364                    num_blocks: 1,
1365                    flags: 0,
1366                },
1367            ],
1368        )
1369        .await;
1370        let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1371            .await
1372            .expect("load should succeed");
1373        let nil_entry = PartitionInfo {
1374            label: "".to_string(),
1375            type_guid: Guid::from_bytes([0u8; 16]),
1376            instance_guid: Guid::from_bytes([0u8; 16]),
1377            start_block: 0,
1378            num_blocks: 0,
1379            flags: 0,
1380        };
1381        let mut new_partitions = vec![nil_entry; 128];
1382        new_partitions[0] = PartitionInfo {
1383            label: PART_3_NAME.to_string(),
1384            type_guid: Guid::from_bytes(PART_TYPE_GUID),
1385            instance_guid: Guid::from_bytes([1u8; 16]),
1386            start_block: 64,
1387            num_blocks: 2,
1388            flags: 0,
1389        };
1390        new_partitions[2] = PartitionInfo {
1391            label: PART_4_NAME.to_string(),
1392            type_guid: Guid::from_bytes(PART_TYPE_GUID),
1393            instance_guid: Guid::from_bytes([2u8; 16]),
1394            start_block: 66,
1395            num_blocks: 4,
1396            flags: 0,
1397        };
1398        runner.reset_partition_table(new_partitions).await.expect("reset_partition_table failed");
1399        partitions_dir.get_entry("part-000").expect("No entry found");
1400        partitions_dir.get_entry("part-001").map(|_| ()).expect_err("Extra entry found");
1401        partitions_dir.get_entry("part-002").expect("No entry found");
1402
1403        let proxy = vfs::serve_directory(
1404            partitions_dir.clone(),
1405            vfs::path::Path::validate_and_split("part-000").unwrap(),
1406            fio::PERM_READABLE,
1407        );
1408        let block =
1409            connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&proxy, "volume")
1410                .expect("Failed to open block service");
1411        let (status, name) = block.get_name().await.expect("FIDL error");
1412        assert_eq!(zx::Status::from_raw(status), zx::Status::OK);
1413        assert_eq!(name.unwrap(), PART_3_NAME);
1414
1415        runner.shutdown().await;
1416    }
1417
1418    #[fuchsia::test]
1419    async fn reset_partition_tables_fails_if_too_many_partitions() {
1420        let (block_device, partitions_dir) = setup(512, 8, vec![]).await;
1421        let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1422            .await
1423            .expect("load should succeed");
1424        let nil_entry = PartitionInfo {
1425            label: "".to_string(),
1426            type_guid: Guid::from_bytes([0u8; 16]),
1427            instance_guid: Guid::from_bytes([0u8; 16]),
1428            start_block: 0,
1429            num_blocks: 0,
1430            flags: 0,
1431        };
1432        let new_partitions = vec![nil_entry; 128];
1433        runner
1434            .reset_partition_table(new_partitions)
1435            .await
1436            .expect_err("reset_partition_table should fail");
1437
1438        runner.shutdown().await;
1439    }
1440
1441    #[fuchsia::test]
1442    async fn reset_partition_tables_fails_if_too_large_partitions() {
1443        let (block_device, partitions_dir) = setup(512, 64, vec![]).await;
1444        let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1445            .await
1446            .expect("load should succeed");
1447        let new_partitions = vec![
1448            PartitionInfo {
1449                label: "a".to_string(),
1450                type_guid: Guid::from_bytes([1u8; 16]),
1451                instance_guid: Guid::from_bytes([1u8; 16]),
1452                start_block: 4,
1453                num_blocks: 2,
1454                flags: 0,
1455            },
1456            PartitionInfo {
1457                label: "b".to_string(),
1458                type_guid: Guid::from_bytes([2u8; 16]),
1459                instance_guid: Guid::from_bytes([2u8; 16]),
1460                start_block: 6,
1461                num_blocks: 200,
1462                flags: 0,
1463            },
1464        ];
1465        runner
1466            .reset_partition_table(new_partitions)
1467            .await
1468            .expect_err("reset_partition_table should fail");
1469
1470        runner.shutdown().await;
1471    }
1472
1473    #[fuchsia::test]
1474    async fn reset_partition_tables_fails_if_partition_overlaps_metadata() {
1475        let (block_device, partitions_dir) = setup(512, 64, vec![]).await;
1476        let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1477            .await
1478            .expect("load should succeed");
1479        let new_partitions = vec![PartitionInfo {
1480            label: "a".to_string(),
1481            type_guid: Guid::from_bytes([1u8; 16]),
1482            instance_guid: Guid::from_bytes([1u8; 16]),
1483            start_block: 1,
1484            num_blocks: 2,
1485            flags: 0,
1486        }];
1487        runner
1488            .reset_partition_table(new_partitions)
1489            .await
1490            .expect_err("reset_partition_table should fail");
1491
1492        runner.shutdown().await;
1493    }
1494
1495    #[fuchsia::test]
1496    async fn reset_partition_tables_fails_if_partitions_overlap() {
1497        let (block_device, partitions_dir) = setup(512, 64, vec![]).await;
1498        let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1499            .await
1500            .expect("load should succeed");
1501        let new_partitions = vec![
1502            PartitionInfo {
1503                label: "a".to_string(),
1504                type_guid: Guid::from_bytes([1u8; 16]),
1505                instance_guid: Guid::from_bytes([1u8; 16]),
1506                start_block: 32,
1507                num_blocks: 2,
1508                flags: 0,
1509            },
1510            PartitionInfo {
1511                label: "b".to_string(),
1512                type_guid: Guid::from_bytes([2u8; 16]),
1513                instance_guid: Guid::from_bytes([2u8; 16]),
1514                start_block: 33,
1515                num_blocks: 1,
1516                flags: 0,
1517            },
1518        ];
1519        runner
1520            .reset_partition_table(new_partitions)
1521            .await
1522            .expect_err("reset_partition_table should fail");
1523
1524        runner.shutdown().await;
1525    }
1526
1527    #[fuchsia::test]
1528    async fn add_partition() {
1529        let (block_device, partitions_dir) = setup(512, 64, vec![PartitionInfo::nil(); 64]).await;
1530        let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1531            .await
1532            .expect("load should succeed");
1533
1534        let transaction = runner.create_transaction().await.expect("Create transaction failed");
1535        let request = fpartitions::PartitionsManagerAddPartitionRequest {
1536            transaction: Some(transaction.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()),
1537            name: Some("a".to_string()),
1538            type_guid: Some(fidl_fuchsia_hardware_block_partition::Guid { value: [1u8; 16] }),
1539            num_blocks: Some(2),
1540            ..Default::default()
1541        };
1542        runner.add_partition(request).await.expect("add_partition failed");
1543        runner.commit_transaction(transaction).await.expect("add_partition failed");
1544
1545        let proxy = vfs::serve_directory(
1546            partitions_dir.clone(),
1547            vfs::path::Path::validate_and_split("part-000").unwrap(),
1548            fio::PERM_READABLE,
1549        );
1550        let block =
1551            connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&proxy, "volume")
1552                .expect("Failed to open block service");
1553        let client = RemoteBlockClient::new(block).await.expect("Failed to create block client");
1554
1555        assert_eq!(client.block_count(), 2);
1556        assert_eq!(client.block_size(), 512);
1557
1558        runner.shutdown().await;
1559    }
1560
1561    #[fuchsia::test]
1562    async fn partition_info() {
1563        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
1564        const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
1565        const PART_NAME: &str = "part";
1566
1567        let (block_device, partitions_dir) = setup_with_options(
1568            VmoBackedServerOptions {
1569                initial_contents: InitialContents::FromCapacity(16),
1570                block_size: 512,
1571                info: DeviceInfo::Block(BlockInfo {
1572                    max_transfer_blocks: NonZero::new(2),
1573                    device_flags: fblock::Flag::READONLY
1574                        | fblock::Flag::REMOVABLE
1575                        | fblock::Flag::ZSTD_DECOMPRESSION_SUPPORT,
1576                    ..Default::default()
1577                }),
1578                ..Default::default()
1579            },
1580            vec![PartitionInfo {
1581                label: PART_NAME.to_string(),
1582                type_guid: Guid::from_bytes(PART_TYPE_GUID),
1583                instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
1584                start_block: 4,
1585                num_blocks: 1,
1586                flags: 0xabcd,
1587            }],
1588        )
1589        .await;
1590
1591        let partitions_dir_clone = partitions_dir.clone();
1592        let runner = GptManager::new(block_device.connect(), partitions_dir_clone)
1593            .await
1594            .expect("load should succeed");
1595
1596        let part_dir = vfs::serve_directory(
1597            partitions_dir.clone(),
1598            vfs::path::Path::validate_and_split("part-000").unwrap(),
1599            fio::PERM_READABLE,
1600        );
1601        let part_block =
1602            connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&part_dir, "volume")
1603                .expect("Failed to open Volume service");
1604        let info = part_block.get_info().await.expect("FIDL error").expect("get_info failed");
1605        assert_eq!(info.block_count, 1);
1606        assert_eq!(info.block_size, 512);
1607        assert_eq!(
1608            info.flags,
1609            fblock::Flag::READONLY
1610                | fblock::Flag::REMOVABLE
1611                | fblock::Flag::ZSTD_DECOMPRESSION_SUPPORT
1612        );
1613        assert_eq!(info.max_transfer_size, 1024);
1614
1615        let metadata =
1616            part_block.get_metadata().await.expect("FIDL error").expect("get_metadata failed");
1617        assert_eq!(metadata.name, Some(PART_NAME.to_string()));
1618        assert_eq!(metadata.type_guid.unwrap().value, PART_TYPE_GUID);
1619        assert_eq!(metadata.instance_guid.unwrap().value, PART_INSTANCE_GUID);
1620        assert_eq!(metadata.start_block_offset, Some(4));
1621        assert_eq!(metadata.num_blocks, Some(1));
1622        assert_eq!(metadata.flags, Some(0xabcd));
1623
1624        runner.shutdown().await;
1625    }
1626
1627    #[fuchsia::test]
1628    async fn nested_gpt() {
1629        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
1630        const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
1631        const PART_NAME: &str = "part";
1632
1633        let vmo = zx::Vmo::create(64 * 512).unwrap();
1634        let vmo_clone = vmo.create_child(zx::VmoChildOptions::REFERENCE, 0, 0).unwrap();
1635        let (outer_block_device, outer_partitions_dir) = setup_with_options(
1636            VmoBackedServerOptions {
1637                initial_contents: InitialContents::FromVmo(vmo_clone),
1638                block_size: 512,
1639                info: DeviceInfo::Block(BlockInfo {
1640                    device_flags: fblock::Flag::READONLY | fblock::Flag::REMOVABLE,
1641                    ..Default::default()
1642                }),
1643                ..Default::default()
1644            },
1645            vec![PartitionInfo {
1646                label: PART_NAME.to_string(),
1647                type_guid: Guid::from_bytes(PART_TYPE_GUID),
1648                instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
1649                start_block: 4,
1650                num_blocks: 16,
1651                flags: 0xabcd,
1652            }],
1653        )
1654        .await;
1655
1656        let outer_partitions_dir_clone = outer_partitions_dir.clone();
1657        let outer_runner =
1658            GptManager::new(outer_block_device.connect(), outer_partitions_dir_clone)
1659                .await
1660                .expect("load should succeed");
1661
1662        let outer_part_dir = vfs::serve_directory(
1663            outer_partitions_dir.clone(),
1664            vfs::path::Path::validate_and_split("part-000").unwrap(),
1665            fio::PERM_READABLE,
1666        );
1667        let part_block =
1668            connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&outer_part_dir, "volume")
1669                .expect("Failed to open Block service");
1670
1671        let client = Arc::new(RemoteBlockClient::new(part_block.clone()).await.unwrap());
1672        let _ = gpt::Gpt::format(
1673            client,
1674            vec![PartitionInfo {
1675                label: PART_NAME.to_string(),
1676                type_guid: Guid::from_bytes(PART_TYPE_GUID),
1677                instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
1678                start_block: 5,
1679                num_blocks: 1,
1680                flags: 0xabcd,
1681            }],
1682        )
1683        .await
1684        .unwrap();
1685
1686        let partitions_dir = vfs::directory::immutable::simple();
1687        let partitions_dir_clone = partitions_dir.clone();
1688        let runner =
1689            GptManager::new(part_block, partitions_dir_clone).await.expect("load should succeed");
1690        let part_dir = vfs::serve_directory(
1691            partitions_dir.clone(),
1692            vfs::path::Path::validate_and_split("part-000").unwrap(),
1693            fio::PERM_READABLE,
1694        );
1695        let inner_part_block =
1696            connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&part_dir, "volume")
1697                .expect("Failed to open Block service");
1698
1699        let client =
1700            RemoteBlockClient::new(inner_part_block).await.expect("Failed to create block client");
1701        assert_eq!(client.block_count(), 1);
1702        assert_eq!(client.block_size(), 512);
1703
1704        let buffer = vec![0xaa; 512];
1705        client.write_at(BufferSlice::Memory(&buffer), 0).await.unwrap();
1706        client
1707            .write_at(BufferSlice::Memory(&buffer), 512)
1708            .await
1709            .expect_err("Write past end should fail");
1710        client.flush().await.unwrap();
1711
1712        runner.shutdown().await;
1713        outer_runner.shutdown().await;
1714
1715        // Check that the write targeted the correct block (4 + 5 = 9)
1716        let data = vmo.read_to_vec::<u8>(9 * 512, 512).unwrap();
1717        assert_eq!(&data[..], &buffer[..]);
1718    }
1719
1720    #[fuchsia::test]
1721    async fn offset_map_does_not_allow_partition_overwrite() {
1722        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
1723        const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
1724        const PART_NAME: &str = "part";
1725
1726        let (block_device, partitions_dir) = setup_with_options(
1727            VmoBackedServerOptions {
1728                initial_contents: InitialContents::FromCapacity(16),
1729                block_size: 512,
1730                info: DeviceInfo::Block(BlockInfo {
1731                    device_flags: fblock::Flag::READONLY | fblock::Flag::REMOVABLE,
1732                    ..Default::default()
1733                }),
1734                ..Default::default()
1735            },
1736            vec![PartitionInfo {
1737                label: PART_NAME.to_string(),
1738                type_guid: Guid::from_bytes(PART_TYPE_GUID),
1739                instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
1740                start_block: 4,
1741                num_blocks: 2,
1742                flags: 0xabcd,
1743            }],
1744        )
1745        .await;
1746
1747        let partitions_dir_clone = partitions_dir.clone();
1748        let runner = GptManager::new(block_device.connect(), partitions_dir_clone)
1749            .await
1750            .expect("load should succeed");
1751
1752        let part_dir = vfs::serve_directory(
1753            partitions_dir.clone(),
1754            vfs::path::Path::validate_and_split("part-000").unwrap(),
1755            fio::PERM_READABLE,
1756        );
1757
1758        let part_block =
1759            connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&part_dir, "volume")
1760                .expect("Failed to open Block service");
1761
1762        // Attempting to open a session with an offset map that extends past the end of the device
1763        // should fail.
1764        let (session, server_end) = fidl::endpoints::create_proxy::<fblock::SessionMarker>();
1765        part_block
1766            .open_session_with_offset_map(
1767                server_end,
1768                &fblock::BlockOffsetMapping {
1769                    source_block_offset: 0,
1770                    target_block_offset: 1,
1771                    length: 2,
1772                },
1773            )
1774            .expect("FIDL error");
1775        session.get_fifo().await.expect_err("Session should be closed");
1776
1777        let (session, server_end) = fidl::endpoints::create_proxy::<fblock::SessionMarker>();
1778        part_block
1779            .open_session_with_offset_map(
1780                server_end,
1781                &fblock::BlockOffsetMapping {
1782                    source_block_offset: 0,
1783                    target_block_offset: 0,
1784                    length: 3,
1785                },
1786            )
1787            .expect("FIDL error");
1788        session.get_fifo().await.expect_err("Session should be closed");
1789
1790        runner.shutdown().await;
1791    }
1792}