gpt_component/
gpt.rs

1// Copyright 2024 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use crate::config::Config;
6use crate::partition::PartitionBackend;
7use crate::partitions_directory::PartitionsDirectory;
8use anyhow::{Context as _, Error, anyhow};
9use block_client::{
10    BlockClient as _, BlockDeviceFlag, BufferSlice, MutableBufferSlice, ReadOptions,
11    RemoteBlockClient, VmoId, WriteOptions,
12};
13use block_server::BlockServer;
14use block_server::async_interface::SessionManager;
15
16use fidl::endpoints::ServerEnd;
17use fuchsia_sync::Mutex;
18use futures::stream::TryStreamExt as _;
19use std::collections::BTreeMap;
20use std::num::NonZero;
21use std::sync::atomic::{AtomicBool, Ordering};
22use std::sync::{Arc, Weak};
23use {
24    fidl_fuchsia_storage_block as fblock, fidl_fuchsia_storage_partitions as fpartitions,
25    fuchsia_async as fasync,
26};
27
28fn partition_directory_entry_name(index: u32) -> String {
29    format!("part-{:03}", index)
30}
31
32/// A single partition in a GPT device.
33pub struct GptPartition {
34    gpt: Weak<GptManager>,
35    info: Mutex<gpt::PartitionInfo>,
36    block_client: Arc<RemoteBlockClient>,
37}
38
39fn trace_id(trace_flow_id: Option<NonZero<u64>>) -> u64 {
40    trace_flow_id.map(|v| v.get()).unwrap_or_default()
41}
42
43impl GptPartition {
44    pub fn new(
45        gpt: &Arc<GptManager>,
46        block_client: Arc<RemoteBlockClient>,
47        info: gpt::PartitionInfo,
48    ) -> Arc<Self> {
49        Arc::new(Self { gpt: Arc::downgrade(gpt), info: Mutex::new(info), block_client })
50    }
51
52    pub async fn terminate(&self) {
53        if let Err(error) = self.block_client.close().await {
54            log::warn!(error:?; "Failed to close block client");
55        }
56    }
57
58    /// Replaces the partition info, returning its old value.
59    pub fn update_info(&self, info: gpt::PartitionInfo) -> gpt::PartitionInfo {
60        std::mem::replace(&mut *self.info.lock(), info)
61    }
62
63    pub fn block_size(&self) -> u32 {
64        self.block_client.block_size()
65    }
66
67    pub fn block_count(&self) -> u64 {
68        self.info.lock().num_blocks
69    }
70
71    pub async fn attach_vmo(&self, vmo: &zx::Vmo) -> Result<VmoId, zx::Status> {
72        self.block_client.attach_vmo(vmo).await
73    }
74
75    pub async fn detach_vmo(&self, vmoid: VmoId) -> Result<(), zx::Status> {
76        self.block_client.detach_vmo(vmoid).await
77    }
78
79    pub fn open_passthrough_session(&self, session: ServerEnd<fblock::SessionMarker>) {
80        if let Some(gpt) = self.gpt.upgrade() {
81            let mapping = {
82                let info = self.info.lock();
83                fblock::BlockOffsetMapping {
84                    source_block_offset: 0,
85                    target_block_offset: info.start_block,
86                    length: info.num_blocks,
87                }
88            };
89            if let Err(err) = gpt.block_proxy.open_session_with_offset_map(session, &mapping) {
90                // Client errors normally come back on `session` but that was already consumed.  The
91                // client will get a PEER_CLOSED without an epitaph.
92                log::warn!(err:?; "Failed to open passthrough session");
93            }
94        } else {
95            if let Err(err) = session.close_with_epitaph(zx::Status::BAD_STATE) {
96                log::warn!(err:?; "Failed to send session epitaph");
97            }
98        }
99    }
100
101    pub fn get_info(&self) -> block_server::DeviceInfo {
102        convert_partition_info(
103            &*self.info.lock(),
104            self.block_client.block_flags(),
105            self.block_client.max_transfer_blocks(),
106        )
107    }
108
109    pub async fn read(
110        &self,
111        device_block_offset: u64,
112        block_count: u32,
113        vmo_id: &VmoId,
114        vmo_offset: u64, // *bytes* not blocks
115        opts: ReadOptions,
116        trace_flow_id: Option<NonZero<u64>>,
117    ) -> Result<(), zx::Status> {
118        let dev_offset = self
119            .absolute_offset(device_block_offset, block_count)
120            .map(|offset| offset * self.block_size() as u64)?;
121        let buffer = MutableBufferSlice::new_with_vmo_id(
122            vmo_id,
123            vmo_offset,
124            (block_count * self.block_size()) as u64,
125        );
126        self.block_client
127            .read_at_with_opts_traced(buffer, dev_offset, opts, trace_id(trace_flow_id))
128            .await
129    }
130
131    pub async fn write(
132        &self,
133        device_block_offset: u64,
134        block_count: u32,
135        vmo_id: &VmoId,
136        vmo_offset: u64, // *bytes* not blocks
137        opts: WriteOptions,
138        trace_flow_id: Option<NonZero<u64>>,
139    ) -> Result<(), zx::Status> {
140        let dev_offset = self
141            .absolute_offset(device_block_offset, block_count)
142            .map(|offset| offset * self.block_size() as u64)?;
143        let buffer = BufferSlice::new_with_vmo_id(
144            vmo_id,
145            vmo_offset,
146            (block_count * self.block_size()) as u64,
147        );
148        self.block_client
149            .write_at_with_opts_traced(buffer, dev_offset, opts, trace_id(trace_flow_id))
150            .await
151    }
152
153    pub async fn flush(&self, trace_flow_id: Option<NonZero<u64>>) -> Result<(), zx::Status> {
154        self.block_client.flush_traced(trace_id(trace_flow_id)).await
155    }
156
157    pub async fn trim(
158        &self,
159        device_block_offset: u64,
160        block_count: u32,
161        trace_flow_id: Option<NonZero<u64>>,
162    ) -> Result<(), zx::Status> {
163        let dev_offset = self
164            .absolute_offset(device_block_offset, block_count)
165            .map(|offset| offset * self.block_size() as u64)?;
166        let len = block_count as u64 * self.block_size() as u64;
167        self.block_client.trim_traced(dev_offset..dev_offset + len, trace_id(trace_flow_id)).await
168    }
169
170    // Converts a relative range specified by [offset, offset+len) into an absolute offset in the
171    // GPT device, performing bounds checking within the partition.  Returns ZX_ERR_OUT_OF_RANGE for
172    // an invalid offset/len.
173    fn absolute_offset(&self, mut offset: u64, len: u32) -> Result<u64, zx::Status> {
174        let info = self.info.lock();
175        offset = offset.checked_add(info.start_block).ok_or(zx::Status::OUT_OF_RANGE)?;
176        let end = offset.checked_add(len as u64).ok_or(zx::Status::OUT_OF_RANGE)?;
177        if end > info.start_block + info.num_blocks {
178            Err(zx::Status::OUT_OF_RANGE)
179        } else {
180            Ok(offset)
181        }
182    }
183}
184
185fn convert_partition_info(
186    info: &gpt::PartitionInfo,
187    device_flags: BlockDeviceFlag,
188    max_transfer_blocks: Option<NonZero<u32>>,
189) -> block_server::DeviceInfo {
190    block_server::DeviceInfo::Partition(block_server::PartitionInfo {
191        device_flags,
192        max_transfer_blocks,
193        block_range: Some(info.start_block..info.start_block + info.num_blocks),
194        type_guid: info.type_guid.to_bytes(),
195        instance_guid: info.instance_guid.to_bytes(),
196        name: info.label.clone(),
197        flags: info.flags,
198    })
199}
200
201fn can_merge(a: &gpt::PartitionInfo, b: &gpt::PartitionInfo) -> bool {
202    a.start_block + a.num_blocks == b.start_block
203}
204
205struct PendingTransaction {
206    transaction: gpt::Transaction,
207    client_koid: zx::Koid,
208    // A list of indexes for partitions which were added in the transaction.  When committing, all
209    // newly created partitions are published.
210    added_partitions: Vec<u32>,
211    // A task which waits for the client end to be closed and clears the pending transaction.
212    _signal_task: fasync::Task<()>,
213}
214
215struct Inner {
216    gpt: gpt::Gpt,
217    partitions: BTreeMap<u32, Arc<BlockServer<SessionManager<PartitionBackend>>>>,
218    // We track these separately so that we do not update them during transaction commit.
219    overlay_partitions: BTreeMap<u32, Arc<BlockServer<SessionManager<PartitionBackend>>>>,
220    // Exposes all partitions for discovery by other components.  Should be kept in sync with
221    // `partitions`.
222    partitions_dir: PartitionsDirectory,
223    pending_transaction: Option<PendingTransaction>,
224}
225
226impl Inner {
227    /// Ensures that `transaction` matches our pending transaction.
228    fn ensure_transaction_matches(&self, transaction: &zx::EventPair) -> Result<(), zx::Status> {
229        if let Some(pending) = self.pending_transaction.as_ref() {
230            if transaction.koid()? == pending.client_koid {
231                Ok(())
232            } else {
233                Err(zx::Status::BAD_HANDLE)
234            }
235        } else {
236            Err(zx::Status::BAD_STATE)
237        }
238    }
239
240    fn bind_partition(
241        &mut self,
242        parent: &Arc<GptManager>,
243        index: u32,
244        info: gpt::PartitionInfo,
245        overlay_indexes: Vec<usize>,
246    ) -> Result<(), Error> {
247        log::trace!(
248            "GPT part {index}{}: {info:?}",
249            if !overlay_indexes.is_empty() { " (overlay)" } else { "" }
250        );
251        info.start_block
252            .checked_add(info.num_blocks)
253            .ok_or_else(|| anyhow!("Overflow in partition end"))?;
254        let partition =
255            PartitionBackend::new(GptPartition::new(parent, self.gpt.client().clone(), info));
256        let block_server = Arc::new(BlockServer::new(parent.block_size, partition));
257        if !overlay_indexes.is_empty() {
258            self.partitions_dir.add_overlay(
259                &partition_directory_entry_name(index),
260                Arc::downgrade(&block_server),
261                Arc::downgrade(parent),
262                overlay_indexes,
263            );
264            self.overlay_partitions.insert(index, block_server);
265        } else {
266            self.partitions_dir.add_partition(
267                &partition_directory_entry_name(index),
268                Arc::downgrade(&block_server),
269                Arc::downgrade(parent),
270                index as usize,
271            );
272            self.partitions.insert(index, block_server);
273        }
274        Ok(())
275    }
276
277    fn bind_super_and_userdata_partition(
278        &mut self,
279        parent: &Arc<GptManager>,
280        super_partition: (u32, gpt::PartitionInfo),
281        userdata_partition: (u32, gpt::PartitionInfo),
282    ) -> Result<(), Error> {
283        let info = gpt::PartitionInfo {
284            // TODO(https://fxbug.dev/443980711): This should come from configuration.
285            label: "super_and_userdata".to_string(),
286            type_guid: super_partition.1.type_guid.clone(),
287            instance_guid: super_partition.1.instance_guid.clone(),
288            start_block: super_partition.1.start_block,
289            num_blocks: super_partition.1.num_blocks + userdata_partition.1.num_blocks,
290            flags: super_partition.1.flags,
291        };
292        log::trace!(
293            "GPT merged parts {:?} + {:?} -> {info:?}",
294            super_partition.1,
295            userdata_partition.1
296        );
297        self.bind_partition(
298            parent,
299            super_partition.0,
300            info,
301            vec![super_partition.0 as usize, userdata_partition.0 as usize],
302        )
303    }
304
305    fn bind_all_partitions(&mut self, parent: &Arc<GptManager>) -> Result<(), Error> {
306        self.partitions.clear();
307        self.overlay_partitions.clear();
308        self.partitions_dir.clear();
309
310        let mut partitions = self.gpt.partitions().clone();
311        if parent.config.merge_super_and_userdata {
312            // Attempt to merge the first `super` and `userdata` we find.  The rest will be treated
313            // as regular partitions.
314            let super_part = match partitions
315                .iter()
316                .find(|(_, info)| info.label == "super")
317                .map(|(index, _)| *index)
318            {
319                Some(index) => partitions.remove_entry(&index),
320                None => None,
321            };
322            let userdata_part = match partitions
323                .iter()
324                .find(|(_, info)| info.label == "userdata")
325                .map(|(index, _)| *index)
326            {
327                Some(index) => partitions.remove_entry(&index),
328                None => None,
329            };
330            if super_part.is_some() && userdata_part.is_some() {
331                let super_part = super_part.unwrap();
332                let userdata_part = userdata_part.unwrap();
333                if can_merge(&super_part.1, &userdata_part.1) {
334                    self.bind_super_and_userdata_partition(parent, super_part, userdata_part)?;
335                } else {
336                    log::warn!("super/userdata cannot be merged");
337                    self.bind_partition(parent, super_part.0, super_part.1, vec![])?;
338                    self.bind_partition(parent, userdata_part.0, userdata_part.1, vec![])?;
339                }
340            } else if super_part.is_some() || userdata_part.is_some() {
341                log::warn!("Only one of super/userdata found; not merging");
342                let (index, info) = super_part.or(userdata_part).unwrap();
343                self.bind_partition(parent, index, info, vec![])?;
344            }
345        }
346        for (index, info) in partitions {
347            self.bind_partition(parent, index, info, vec![])?;
348        }
349        Ok(())
350    }
351
352    fn add_partition(&mut self, info: gpt::PartitionInfo) -> Result<usize, gpt::AddPartitionError> {
353        let pending = self.pending_transaction.as_mut().unwrap();
354        let idx = self.gpt.add_partition(&mut pending.transaction, info)?;
355        pending.added_partitions.push(idx as u32);
356        Ok(idx)
357    }
358}
359
360/// Runs a GPT device.
361pub struct GptManager {
362    config: Config,
363    block_proxy: fblock::BlockProxy,
364    block_size: u32,
365    block_count: u64,
366    inner: futures::lock::Mutex<Inner>,
367    shutdown: AtomicBool,
368}
369
370impl std::fmt::Debug for GptManager {
371    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
372        f.debug_struct("GptManager")
373            .field("block_size", &self.block_size)
374            .field("block_count", &self.block_count)
375            .finish()
376    }
377}
378
379impl GptManager {
380    pub async fn new(
381        block_proxy: fblock::BlockProxy,
382        partitions_dir: Arc<vfs::directory::immutable::Simple>,
383    ) -> Result<Arc<Self>, Error> {
384        Self::new_with_config(block_proxy, partitions_dir, Config::default()).await
385    }
386
387    pub async fn new_with_config(
388        block_proxy: fblock::BlockProxy,
389        partitions_dir: Arc<vfs::directory::immutable::Simple>,
390        config: Config,
391    ) -> Result<Arc<Self>, Error> {
392        log::info!("Binding to GPT");
393        let client = Arc::new(RemoteBlockClient::new(block_proxy.clone()).await?);
394        let block_size = client.block_size();
395        let block_count = client.block_count();
396        let gpt = gpt::Gpt::open(client).await.context("Failed to load GPT")?;
397
398        let this = Arc::new(Self {
399            config,
400            block_proxy,
401            block_size,
402            block_count,
403            inner: futures::lock::Mutex::new(Inner {
404                gpt,
405                partitions: BTreeMap::new(),
406                overlay_partitions: BTreeMap::new(),
407                partitions_dir: PartitionsDirectory::new(partitions_dir),
408                pending_transaction: None,
409            }),
410            shutdown: AtomicBool::new(false),
411        });
412        this.inner.lock().await.bind_all_partitions(&this)?;
413        log::info!("Starting all partitions OK!");
414        Ok(this)
415    }
416
417    pub fn block_size(&self) -> u32 {
418        self.block_size
419    }
420
421    pub fn block_count(&self) -> u64 {
422        self.block_count
423    }
424
425    pub async fn create_transaction(self: &Arc<Self>) -> Result<zx::EventPair, zx::Status> {
426        let mut inner = self.inner.lock().await;
427        if inner.pending_transaction.is_some() {
428            return Err(zx::Status::ALREADY_EXISTS);
429        }
430        let transaction = inner.gpt.create_transaction().unwrap();
431        let (client_end, server_end) = zx::EventPair::create();
432        let client_koid = client_end.koid()?;
433        let signal_waiter = fasync::OnSignals::new(server_end, zx::Signals::EVENTPAIR_PEER_CLOSED);
434        let this = self.clone();
435        let task = fasync::Task::spawn(async move {
436            let _ = signal_waiter.await;
437            let mut inner = this.inner.lock().await;
438            if inner.pending_transaction.as_ref().map_or(false, |t| t.client_koid == client_koid) {
439                inner.pending_transaction = None;
440            }
441        });
442        inner.pending_transaction = Some(PendingTransaction {
443            transaction,
444            client_koid,
445            added_partitions: vec![],
446            _signal_task: task,
447        });
448        Ok(client_end)
449    }
450
451    pub async fn commit_transaction(
452        self: &Arc<Self>,
453        transaction: zx::EventPair,
454    ) -> Result<(), zx::Status> {
455        let mut inner = self.inner.lock().await;
456        inner.ensure_transaction_matches(&transaction)?;
457        let pending = std::mem::take(&mut inner.pending_transaction).unwrap();
458        let partitions = pending.transaction.partitions.clone();
459        if let Err(err) = inner.gpt.commit_transaction(pending.transaction).await {
460            log::warn!(err:?; "Failed to commit transaction");
461            return Err(zx::Status::IO);
462        }
463        // Everything after this point should be infallible.
464        for (info, idx) in partitions
465            .iter()
466            .zip(0u32..)
467            .filter(|(info, idx)| !info.is_nil() && !pending.added_partitions.contains(idx))
468        {
469            // Some physical partitions are not tracked in `inner.partitions` (e.g. when we use an
470            // overlay partition to combine two physical partitions).  In this case, we still need
471            // to propagate the info in the underlying transaction, but there's no need to update
472            // the in-memory info.
473            // Note that overlay partitions can't be changed by transactions anyways, so the info
474            // we propagate should be exactly what it was when we created the transaction.
475            if let Some(part) = inner.partitions.get(&idx) {
476                part.session_manager().interface().update_info(info.clone());
477            }
478        }
479        for idx in pending.added_partitions {
480            if let Some(info) = inner.gpt.partitions().get(&idx).cloned() {
481                if let Err(err) = inner.bind_partition(self, idx, info, vec![]) {
482                    log::error!(err:?; "Failed to bind partition");
483                }
484            }
485        }
486        Ok(())
487    }
488
489    pub async fn add_partition(
490        &self,
491        request: fpartitions::PartitionsManagerAddPartitionRequest,
492    ) -> Result<(), zx::Status> {
493        let mut inner = self.inner.lock().await;
494        inner.ensure_transaction_matches(
495            request.transaction.as_ref().ok_or(zx::Status::BAD_HANDLE)?,
496        )?;
497        let info = gpt::PartitionInfo {
498            label: request.name.ok_or(zx::Status::INVALID_ARGS)?,
499            type_guid: request
500                .type_guid
501                .map(|value| gpt::Guid::from_bytes(value.value))
502                .ok_or(zx::Status::INVALID_ARGS)?,
503            instance_guid: request
504                .instance_guid
505                .map(|value| gpt::Guid::from_bytes(value.value))
506                .unwrap_or_else(|| gpt::Guid::generate()),
507            start_block: 0,
508            num_blocks: request.num_blocks.ok_or(zx::Status::INVALID_ARGS)?,
509            flags: request.flags.unwrap_or_default(),
510        };
511        let idx = inner.add_partition(info)?;
512        let partition =
513            inner.pending_transaction.as_ref().unwrap().transaction.partitions.get(idx).unwrap();
514        log::info!(
515            "Allocated partition {:?} at {:?}",
516            partition.label,
517            partition.start_block..partition.start_block + partition.num_blocks
518        );
519        Ok(())
520    }
521
522    pub async fn handle_partitions_requests(
523        &self,
524        gpt_index: usize,
525        mut requests: fpartitions::PartitionRequestStream,
526    ) -> Result<(), zx::Status> {
527        while let Some(request) = requests.try_next().await.unwrap() {
528            match request {
529                fpartitions::PartitionRequest::UpdateMetadata { payload, responder } => {
530                    responder
531                        .send(
532                            self.update_partition_metadata(gpt_index, payload)
533                                .await
534                                .map_err(|status| status.into_raw()),
535                        )
536                        .unwrap_or_else(
537                            |err| log::error!(err:?; "Failed to send UpdateMetadata response"),
538                        );
539                }
540            }
541        }
542        Ok(())
543    }
544
545    async fn update_partition_metadata(
546        &self,
547        gpt_index: usize,
548        request: fpartitions::PartitionUpdateMetadataRequest,
549    ) -> Result<(), zx::Status> {
550        let mut inner = self.inner.lock().await;
551        inner.ensure_transaction_matches(
552            request.transaction.as_ref().ok_or(zx::Status::BAD_HANDLE)?,
553        )?;
554
555        let transaction = &mut inner.pending_transaction.as_mut().unwrap().transaction;
556        let entry = transaction.partitions.get_mut(gpt_index).ok_or(zx::Status::BAD_STATE)?;
557        if let Some(type_guid) = request.type_guid.as_ref().cloned() {
558            entry.type_guid = gpt::Guid::from_bytes(type_guid.value);
559        }
560        if let Some(flags) = request.flags.as_ref() {
561            entry.flags = *flags;
562        }
563        Ok(())
564    }
565
566    pub async fn handle_overlay_partitions_requests(
567        &self,
568        gpt_indexes: Vec<usize>,
569        mut requests: fpartitions::OverlayPartitionRequestStream,
570    ) -> Result<(), zx::Status> {
571        while let Some(request) = requests.try_next().await.unwrap() {
572            match request {
573                fpartitions::OverlayPartitionRequest::GetPartitions { responder } => {
574                    match self.get_overlay_partition_info(&gpt_indexes[..]).await {
575                        Ok(partitions) => responder.send(Ok(&partitions[..])),
576                        Err(status) => responder.send(Err(status.into_raw())),
577                    }
578                    .unwrap_or_else(
579                        |err| log::error!(err:?; "Failed to send GetPartitions response"),
580                    );
581                }
582            }
583        }
584        Ok(())
585    }
586
587    async fn get_overlay_partition_info(
588        &self,
589        gpt_indexes: &[usize],
590    ) -> Result<Vec<fpartitions::PartitionInfo>, zx::Status> {
591        fn convert_partition_info(info: &gpt::PartitionInfo) -> fpartitions::PartitionInfo {
592            fpartitions::PartitionInfo {
593                name: info.label.to_string(),
594                type_guid: fblock::Guid { value: info.type_guid.to_bytes() },
595                instance_guid: fblock::Guid { value: info.instance_guid.to_bytes() },
596                start_block: info.start_block,
597                num_blocks: info.num_blocks,
598                flags: info.flags,
599            }
600        }
601
602        let inner = self.inner.lock().await;
603        let mut partitions = vec![];
604        for index in gpt_indexes {
605            let index: u32 = *index as u32;
606            partitions.push(
607                inner
608                    .gpt
609                    .partitions()
610                    .get(&index)
611                    .map(convert_partition_info)
612                    .ok_or(zx::Status::BAD_STATE)?,
613            );
614        }
615        Ok(partitions)
616    }
617
618    pub async fn reset_partition_table(
619        self: &Arc<Self>,
620        partitions: Vec<gpt::PartitionInfo>,
621    ) -> Result<(), zx::Status> {
622        let mut inner = self.inner.lock().await;
623        if inner.pending_transaction.is_some() {
624            return Err(zx::Status::BAD_STATE);
625        }
626
627        log::info!("Resetting gpt.  Expect data loss!!!");
628        let mut transaction = inner.gpt.create_transaction().unwrap();
629        transaction.partitions = partitions;
630        inner.gpt.commit_transaction(transaction).await?;
631
632        if let Err(err) = inner.bind_all_partitions(&self) {
633            log::error!(err:?; "Failed to rebind partitions");
634            return Err(zx::Status::BAD_STATE);
635        }
636        log::info!("Rebinding partitions OK!");
637        Ok(())
638    }
639
640    pub async fn shutdown(self: Arc<Self>) {
641        log::info!("Shutting down gpt");
642        let mut inner = self.inner.lock().await;
643        inner.partitions_dir.clear();
644        inner.partitions.clear();
645        inner.overlay_partitions.clear();
646        self.shutdown.store(true, Ordering::Relaxed);
647        log::info!("Shutting down gpt OK");
648    }
649}
650
651impl Drop for GptManager {
652    fn drop(&mut self) {
653        assert!(self.shutdown.load(Ordering::Relaxed), "Did you forget to shutdown?");
654    }
655}
656
657#[cfg(test)]
658mod tests {
659    use super::GptManager;
660    use block_client::{
661        BlockClient as _, BlockDeviceFlag, BufferSlice, MutableBufferSlice, RemoteBlockClient,
662        WriteFlags,
663    };
664    use block_server::{BlockInfo, DeviceInfo, WriteOptions};
665    use fidl::HandleBased as _;
666    use fuchsia_component::client::connect_to_named_protocol_at_dir_root;
667    use gpt::{Gpt, Guid, PartitionInfo};
668    use std::num::NonZero;
669    use std::sync::Arc;
670    use std::sync::atomic::{AtomicBool, Ordering};
671    use vmo_backed_block_server::{
672        InitialContents, VmoBackedServer, VmoBackedServerOptions, VmoBackedServerTestingExt as _,
673    };
674    use {
675        fidl_fuchsia_io as fio, fidl_fuchsia_storage_block as fblock,
676        fidl_fuchsia_storage_partitions as fpartitions, fuchsia_async as fasync,
677    };
678
679    async fn setup(
680        block_size: u32,
681        block_count: u64,
682        partitions: Vec<PartitionInfo>,
683    ) -> (Arc<VmoBackedServer>, Arc<vfs::directory::immutable::Simple>) {
684        setup_with_options(
685            VmoBackedServerOptions {
686                initial_contents: InitialContents::FromCapacity(block_count),
687                block_size,
688                ..Default::default()
689            },
690            partitions,
691        )
692        .await
693    }
694
695    async fn setup_with_options(
696        opts: VmoBackedServerOptions<'_>,
697        partitions: Vec<PartitionInfo>,
698    ) -> (Arc<VmoBackedServer>, Arc<vfs::directory::immutable::Simple>) {
699        let server = Arc::new(opts.build().unwrap());
700        {
701            let (block_client, block_server) =
702                fidl::endpoints::create_proxy::<fblock::BlockMarker>();
703            let volume_stream = fidl::endpoints::ServerEnd::<fblock::BlockMarker>::from(
704                block_server.into_channel(),
705            )
706            .into_stream();
707            let server_clone = server.clone();
708            let _task = fasync::Task::spawn(async move { server_clone.serve(volume_stream).await });
709            let client = Arc::new(RemoteBlockClient::new(block_client).await.unwrap());
710            Gpt::format(client, partitions).await.unwrap();
711        }
712        (server, vfs::directory::immutable::simple())
713    }
714
715    #[fuchsia::test]
716    async fn load_unformatted_gpt() {
717        let vmo = zx::Vmo::create(4096).unwrap();
718        let server = Arc::new(VmoBackedServer::from_vmo(512, vmo));
719
720        GptManager::new(server.connect(), vfs::directory::immutable::simple())
721            .await
722            .expect_err("load should fail");
723    }
724
725    #[fuchsia::test]
726    async fn load_formatted_empty_gpt() {
727        let (block_device, partitions_dir) = setup(512, 8, vec![]).await;
728
729        let runner = GptManager::new(block_device.connect(), partitions_dir)
730            .await
731            .expect("load should succeed");
732        runner.shutdown().await;
733    }
734
735    #[fuchsia::test]
736    async fn load_formatted_gpt_with_one_partition() {
737        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
738        const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
739        const PART_NAME: &str = "part";
740
741        let (block_device, partitions_dir) = setup(
742            512,
743            8,
744            vec![PartitionInfo {
745                label: PART_NAME.to_string(),
746                type_guid: Guid::from_bytes(PART_TYPE_GUID),
747                instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
748                start_block: 4,
749                num_blocks: 1,
750                flags: 0,
751            }],
752        )
753        .await;
754
755        let partitions_dir_clone = partitions_dir.clone();
756        let runner = GptManager::new(block_device.connect(), partitions_dir_clone)
757            .await
758            .expect("load should succeed");
759        partitions_dir.get_entry("part-000").expect("No entry found");
760        partitions_dir.get_entry("part-001").map(|_| ()).expect_err("Extra entry found");
761        runner.shutdown().await;
762    }
763
764    #[fuchsia::test]
765    async fn load_formatted_gpt_with_two_partitions() {
766        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
767        const PART_INSTANCE_1_GUID: [u8; 16] = [2u8; 16];
768        const PART_INSTANCE_2_GUID: [u8; 16] = [3u8; 16];
769        const PART_1_NAME: &str = "part1";
770        const PART_2_NAME: &str = "part2";
771
772        let (block_device, partitions_dir) = setup(
773            512,
774            8,
775            vec![
776                PartitionInfo {
777                    label: PART_1_NAME.to_string(),
778                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
779                    instance_guid: Guid::from_bytes(PART_INSTANCE_1_GUID),
780                    start_block: 4,
781                    num_blocks: 1,
782                    flags: 0,
783                },
784                PartitionInfo {
785                    label: PART_2_NAME.to_string(),
786                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
787                    instance_guid: Guid::from_bytes(PART_INSTANCE_2_GUID),
788                    start_block: 5,
789                    num_blocks: 1,
790                    flags: 0,
791                },
792            ],
793        )
794        .await;
795
796        let partitions_dir_clone = partitions_dir.clone();
797        let runner = GptManager::new(block_device.connect(), partitions_dir_clone)
798            .await
799            .expect("load should succeed");
800        partitions_dir.get_entry("part-000").expect("No entry found");
801        partitions_dir.get_entry("part-001").expect("No entry found");
802        partitions_dir.get_entry("part-002").map(|_| ()).expect_err("Extra entry found");
803        runner.shutdown().await;
804    }
805
806    #[fuchsia::test]
807    async fn partition_io() {
808        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
809        const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
810        const PART_NAME: &str = "part";
811
812        let (block_device, partitions_dir) = setup(
813            512,
814            8,
815            vec![PartitionInfo {
816                label: PART_NAME.to_string(),
817                type_guid: Guid::from_bytes(PART_TYPE_GUID),
818                instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
819                start_block: 4,
820                num_blocks: 2,
821                flags: 0,
822            }],
823        )
824        .await;
825
826        let partitions_dir_clone = partitions_dir.clone();
827        let runner = GptManager::new(block_device.connect(), partitions_dir_clone)
828            .await
829            .expect("load should succeed");
830
831        let proxy = vfs::serve_directory(
832            partitions_dir.clone(),
833            vfs::path::Path::validate_and_split("part-000").unwrap(),
834            fio::PERM_READABLE,
835        );
836        let block = connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&proxy, "volume")
837            .expect("Failed to open block service");
838        let client = RemoteBlockClient::new(block).await.expect("Failed to create block client");
839
840        assert_eq!(client.block_count(), 2);
841        assert_eq!(client.block_size(), 512);
842
843        let buf = vec![0xabu8; 512];
844        client.write_at(BufferSlice::Memory(&buf[..]), 0).await.expect("write_at failed");
845        client
846            .write_at(BufferSlice::Memory(&buf[..]), 1024)
847            .await
848            .expect_err("write_at should fail when writing past partition end");
849        let mut buf2 = vec![0u8; 512];
850        client.read_at(MutableBufferSlice::Memory(&mut buf2[..]), 0).await.expect("read_at failed");
851        assert_eq!(buf, buf2);
852        client
853            .read_at(MutableBufferSlice::Memory(&mut buf2[..]), 1024)
854            .await
855            .expect_err("read_at should fail when reading past partition end");
856        client.trim(512..1024).await.expect("trim failed");
857        client.trim(1..512).await.expect_err("trim with invalid range should fail");
858        client.trim(1024..1536).await.expect_err("trim past end of partition should fail");
859        runner.shutdown().await;
860
861        // Ensure writes persisted to the partition.
862        let mut buf = vec![0u8; 512];
863        let client =
864            RemoteBlockClient::new(block_device.connect::<fblock::BlockProxy>()).await.unwrap();
865        client.read_at(MutableBufferSlice::Memory(&mut buf[..]), 2048).await.unwrap();
866        assert_eq!(&buf[..], &[0xabu8; 512]);
867    }
868
869    #[fuchsia::test]
870    async fn load_formatted_gpt_with_invalid_primary_header() {
871        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
872        const PART_INSTANCE_1_GUID: [u8; 16] = [2u8; 16];
873        const PART_INSTANCE_2_GUID: [u8; 16] = [3u8; 16];
874        const PART_1_NAME: &str = "part1";
875        const PART_2_NAME: &str = "part2";
876
877        let (block_device, partitions_dir) = setup(
878            512,
879            8,
880            vec![
881                PartitionInfo {
882                    label: PART_1_NAME.to_string(),
883                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
884                    instance_guid: Guid::from_bytes(PART_INSTANCE_1_GUID),
885                    start_block: 4,
886                    num_blocks: 1,
887                    flags: 0,
888                },
889                PartitionInfo {
890                    label: PART_2_NAME.to_string(),
891                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
892                    instance_guid: Guid::from_bytes(PART_INSTANCE_2_GUID),
893                    start_block: 5,
894                    num_blocks: 1,
895                    flags: 0,
896                },
897            ],
898        )
899        .await;
900        {
901            let (client, stream) =
902                fidl::endpoints::create_proxy_and_stream::<fblock::BlockMarker>();
903            let server = block_device.clone();
904            let _task = fasync::Task::spawn(async move { server.serve(stream).await });
905            let client = RemoteBlockClient::new(client).await.unwrap();
906            client.write_at(BufferSlice::Memory(&[0xffu8; 512]), 512).await.unwrap();
907        }
908
909        let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
910            .await
911            .expect("load should succeed");
912        partitions_dir.get_entry("part-000").expect("No entry found");
913        partitions_dir.get_entry("part-001").expect("No entry found");
914        runner.shutdown().await;
915    }
916
917    #[fuchsia::test]
918    async fn load_formatted_gpt_with_invalid_primary_partition_table() {
919        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
920        const PART_INSTANCE_1_GUID: [u8; 16] = [2u8; 16];
921        const PART_INSTANCE_2_GUID: [u8; 16] = [3u8; 16];
922        const PART_1_NAME: &str = "part1";
923        const PART_2_NAME: &str = "part2";
924
925        let (block_device, partitions_dir) = setup(
926            512,
927            8,
928            vec![
929                PartitionInfo {
930                    label: PART_1_NAME.to_string(),
931                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
932                    instance_guid: Guid::from_bytes(PART_INSTANCE_1_GUID),
933                    start_block: 4,
934                    num_blocks: 1,
935                    flags: 0,
936                },
937                PartitionInfo {
938                    label: PART_2_NAME.to_string(),
939                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
940                    instance_guid: Guid::from_bytes(PART_INSTANCE_2_GUID),
941                    start_block: 5,
942                    num_blocks: 1,
943                    flags: 0,
944                },
945            ],
946        )
947        .await;
948        {
949            let (client, stream) =
950                fidl::endpoints::create_proxy_and_stream::<fblock::BlockMarker>();
951            let server = block_device.clone();
952            let _task = fasync::Task::spawn(async move { server.serve(stream).await });
953            let client = RemoteBlockClient::new(client).await.unwrap();
954            client.write_at(BufferSlice::Memory(&[0xffu8; 512]), 1024).await.unwrap();
955        }
956
957        let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
958            .await
959            .expect("load should succeed");
960        partitions_dir.get_entry("part-000").expect("No entry found");
961        partitions_dir.get_entry("part-001").expect("No entry found");
962        runner.shutdown().await;
963    }
964
965    #[fuchsia::test]
966    async fn force_access_passed_through() {
967        const BLOCK_SIZE: u32 = 512;
968        const BLOCK_COUNT: u64 = 1024;
969
970        struct Observer(Arc<AtomicBool>);
971
972        impl vmo_backed_block_server::Observer for Observer {
973            fn write(
974                &self,
975                _device_block_offset: u64,
976                _block_count: u32,
977                _vmo: &Arc<zx::Vmo>,
978                _vmo_offset: u64,
979                opts: WriteOptions,
980            ) -> vmo_backed_block_server::WriteAction {
981                assert_eq!(
982                    opts.flags.contains(WriteFlags::FORCE_ACCESS),
983                    self.0.load(Ordering::Relaxed)
984                );
985                vmo_backed_block_server::WriteAction::Write
986            }
987        }
988
989        let expect_force_access = Arc::new(AtomicBool::new(false));
990        let (server, partitions_dir) = setup_with_options(
991            VmoBackedServerOptions {
992                initial_contents: InitialContents::FromCapacity(BLOCK_COUNT),
993                block_size: BLOCK_SIZE,
994                observer: Some(Box::new(Observer(expect_force_access.clone()))),
995                info: DeviceInfo::Block(BlockInfo {
996                    device_flags: fblock::DeviceFlag::FUA_SUPPORT,
997                    ..Default::default()
998                }),
999                ..Default::default()
1000            },
1001            vec![PartitionInfo {
1002                label: "foo".to_string(),
1003                type_guid: Guid::from_bytes([1; 16]),
1004                instance_guid: Guid::from_bytes([2; 16]),
1005                start_block: 4,
1006                num_blocks: 1,
1007                flags: 0,
1008            }],
1009        )
1010        .await;
1011
1012        let manager = GptManager::new(server.connect(), partitions_dir.clone()).await.unwrap();
1013
1014        let proxy = vfs::serve_directory(
1015            partitions_dir.clone(),
1016            vfs::path::Path::validate_and_split("part-000").unwrap(),
1017            fio::PERM_READABLE,
1018        );
1019        let block = connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&proxy, "volume")
1020            .expect("Failed to open block service");
1021        let client = RemoteBlockClient::new(block).await.expect("Failed to create block client");
1022
1023        let buffer = vec![0; BLOCK_SIZE as usize];
1024        client.write_at(BufferSlice::Memory(&buffer), 0).await.unwrap();
1025
1026        expect_force_access.store(true, Ordering::Relaxed);
1027
1028        client
1029            .write_at_with_opts(
1030                BufferSlice::Memory(&buffer),
1031                0,
1032                WriteOptions { flags: WriteFlags::FORCE_ACCESS, ..Default::default() },
1033            )
1034            .await
1035            .unwrap();
1036
1037        manager.shutdown().await;
1038    }
1039
1040    #[fuchsia::test]
1041    async fn barrier_passed_through() {
1042        const BLOCK_SIZE: u32 = 512;
1043        const BLOCK_COUNT: u64 = 1024;
1044
1045        struct Observer(Arc<AtomicBool>);
1046
1047        impl vmo_backed_block_server::Observer for Observer {
1048            fn write(
1049                &self,
1050                _device_block_offset: u64,
1051                _block_count: u32,
1052                _vmo: &Arc<zx::Vmo>,
1053                _vmo_offset: u64,
1054                opts: WriteOptions,
1055            ) -> vmo_backed_block_server::WriteAction {
1056                assert_eq!(
1057                    opts.flags.contains(WriteFlags::PRE_BARRIER),
1058                    self.0.load(Ordering::Relaxed)
1059                );
1060                vmo_backed_block_server::WriteAction::Write
1061            }
1062        }
1063
1064        let expect_barrier = Arc::new(AtomicBool::new(false));
1065        let (server, partitions_dir) = setup_with_options(
1066            VmoBackedServerOptions {
1067                initial_contents: InitialContents::FromCapacity(BLOCK_COUNT),
1068                block_size: BLOCK_SIZE,
1069                observer: Some(Box::new(Observer(expect_barrier.clone()))),
1070                info: DeviceInfo::Block(BlockInfo {
1071                    device_flags: fblock::DeviceFlag::BARRIER_SUPPORT,
1072                    ..Default::default()
1073                }),
1074                ..Default::default()
1075            },
1076            vec![PartitionInfo {
1077                label: "foo".to_string(),
1078                type_guid: Guid::from_bytes([1; 16]),
1079                instance_guid: Guid::from_bytes([2; 16]),
1080                start_block: 4,
1081                num_blocks: 1,
1082                flags: 0,
1083            }],
1084        )
1085        .await;
1086
1087        let manager = GptManager::new(server.connect(), partitions_dir.clone()).await.unwrap();
1088
1089        let proxy = vfs::serve_directory(
1090            partitions_dir.clone(),
1091            vfs::path::Path::validate_and_split("part-000").unwrap(),
1092            fio::PERM_READABLE,
1093        );
1094        let block = connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&proxy, "volume")
1095            .expect("Failed to open block service");
1096        let client = RemoteBlockClient::new(block).await.expect("Failed to create block client");
1097
1098        let buffer = vec![0; BLOCK_SIZE as usize];
1099        client.write_at(BufferSlice::Memory(&buffer), 0).await.unwrap();
1100
1101        expect_barrier.store(true, Ordering::Relaxed);
1102        client.barrier();
1103        client.write_at(BufferSlice::Memory(&buffer), 0).await.unwrap();
1104
1105        manager.shutdown().await;
1106    }
1107
1108    #[fuchsia::test]
1109    async fn commit_transaction() {
1110        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
1111        const PART_1_INSTANCE_GUID: [u8; 16] = [2u8; 16];
1112        const PART_1_NAME: &str = "part";
1113        const PART_2_INSTANCE_GUID: [u8; 16] = [3u8; 16];
1114        const PART_2_NAME: &str = "part2";
1115
1116        let (block_device, partitions_dir) = setup(
1117            512,
1118            16,
1119            vec![
1120                PartitionInfo {
1121                    label: PART_1_NAME.to_string(),
1122                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
1123                    instance_guid: Guid::from_bytes(PART_1_INSTANCE_GUID),
1124                    start_block: 4,
1125                    num_blocks: 1,
1126                    flags: 0,
1127                },
1128                PartitionInfo {
1129                    label: PART_2_NAME.to_string(),
1130                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
1131                    instance_guid: Guid::from_bytes(PART_2_INSTANCE_GUID),
1132                    start_block: 5,
1133                    num_blocks: 1,
1134                    flags: 0,
1135                },
1136            ],
1137        )
1138        .await;
1139        let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1140            .await
1141            .expect("load should succeed");
1142
1143        let part_0_dir = vfs::serve_directory(
1144            partitions_dir.clone(),
1145            vfs::Path::validate_and_split("part-000").unwrap(),
1146            fio::PERM_READABLE,
1147        );
1148        let part_1_dir = vfs::serve_directory(
1149            partitions_dir.clone(),
1150            vfs::Path::validate_and_split("part-001").unwrap(),
1151            fio::PERM_READABLE,
1152        );
1153        let part_0_proxy = connect_to_named_protocol_at_dir_root::<fpartitions::PartitionMarker>(
1154            &part_0_dir,
1155            "partition",
1156        )
1157        .expect("Failed to open Partition service");
1158        let part_1_proxy = connect_to_named_protocol_at_dir_root::<fpartitions::PartitionMarker>(
1159            &part_1_dir,
1160            "partition",
1161        )
1162        .expect("Failed to open Partition service");
1163
1164        let transaction = runner.create_transaction().await.expect("Failed to create transaction");
1165        part_0_proxy
1166            .update_metadata(fpartitions::PartitionUpdateMetadataRequest {
1167                transaction: Some(transaction.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()),
1168                type_guid: Some(fblock::Guid { value: [0xffu8; 16] }),
1169                ..Default::default()
1170            })
1171            .await
1172            .expect("FIDL error")
1173            .expect("Failed to update_metadata");
1174        part_1_proxy
1175            .update_metadata(fpartitions::PartitionUpdateMetadataRequest {
1176                transaction: Some(transaction.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()),
1177                flags: Some(1234),
1178                ..Default::default()
1179            })
1180            .await
1181            .expect("FIDL error")
1182            .expect("Failed to update_metadata");
1183        runner.commit_transaction(transaction).await.expect("Failed to commit transaction");
1184
1185        // Ensure the changes have propagated to the correct partitions.
1186        let part_0_block =
1187            connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&part_0_dir, "volume")
1188                .expect("Failed to open Volume service");
1189        let (status, guid) = part_0_block.get_type_guid().await.expect("FIDL error");
1190        assert_eq!(zx::Status::from_raw(status), zx::Status::OK);
1191        assert_eq!(guid.unwrap().value, [0xffu8; 16]);
1192        let part_1_block =
1193            connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&part_1_dir, "volume")
1194                .expect("Failed to open Volume service");
1195        let metadata =
1196            part_1_block.get_metadata().await.expect("FIDL error").expect("get_metadata failed");
1197        assert_eq!(metadata.type_guid.unwrap().value, PART_TYPE_GUID);
1198        assert_eq!(metadata.flags, Some(1234));
1199
1200        runner.shutdown().await;
1201    }
1202
1203    #[fuchsia::test]
1204    async fn commit_transaction_with_io_error() {
1205        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
1206        const PART_1_INSTANCE_GUID: [u8; 16] = [2u8; 16];
1207        const PART_1_NAME: &str = "part";
1208        const PART_2_INSTANCE_GUID: [u8; 16] = [3u8; 16];
1209        const PART_2_NAME: &str = "part2";
1210
1211        #[derive(Clone)]
1212        struct Observer(Arc<AtomicBool>);
1213        impl vmo_backed_block_server::Observer for Observer {
1214            fn write(
1215                &self,
1216                _device_block_offset: u64,
1217                _block_count: u32,
1218                _vmo: &Arc<zx::Vmo>,
1219                _vmo_offset: u64,
1220                _opts: WriteOptions,
1221            ) -> vmo_backed_block_server::WriteAction {
1222                if self.0.load(Ordering::Relaxed) {
1223                    vmo_backed_block_server::WriteAction::Fail
1224                } else {
1225                    vmo_backed_block_server::WriteAction::Write
1226                }
1227            }
1228        }
1229        let observer = Observer(Arc::new(AtomicBool::new(false)));
1230        let (block_device, partitions_dir) = setup_with_options(
1231            VmoBackedServerOptions {
1232                initial_contents: InitialContents::FromCapacity(16),
1233                block_size: 512,
1234                observer: Some(Box::new(observer.clone())),
1235                ..Default::default()
1236            },
1237            vec![
1238                PartitionInfo {
1239                    label: PART_1_NAME.to_string(),
1240                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
1241                    instance_guid: Guid::from_bytes(PART_1_INSTANCE_GUID),
1242                    start_block: 4,
1243                    num_blocks: 1,
1244                    flags: 0,
1245                },
1246                PartitionInfo {
1247                    label: PART_2_NAME.to_string(),
1248                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
1249                    instance_guid: Guid::from_bytes(PART_2_INSTANCE_GUID),
1250                    start_block: 5,
1251                    num_blocks: 1,
1252                    flags: 0,
1253                },
1254            ],
1255        )
1256        .await;
1257        let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1258            .await
1259            .expect("load should succeed");
1260
1261        let part_0_dir = vfs::serve_directory(
1262            partitions_dir.clone(),
1263            vfs::Path::validate_and_split("part-000").unwrap(),
1264            fio::PERM_READABLE,
1265        );
1266        let part_1_dir = vfs::serve_directory(
1267            partitions_dir.clone(),
1268            vfs::Path::validate_and_split("part-001").unwrap(),
1269            fio::PERM_READABLE,
1270        );
1271        let part_0_proxy = connect_to_named_protocol_at_dir_root::<fpartitions::PartitionMarker>(
1272            &part_0_dir,
1273            "partition",
1274        )
1275        .expect("Failed to open Partition service");
1276        let part_1_proxy = connect_to_named_protocol_at_dir_root::<fpartitions::PartitionMarker>(
1277            &part_1_dir,
1278            "partition",
1279        )
1280        .expect("Failed to open Partition service");
1281
1282        let transaction = runner.create_transaction().await.expect("Failed to create transaction");
1283        part_0_proxy
1284            .update_metadata(fpartitions::PartitionUpdateMetadataRequest {
1285                transaction: Some(transaction.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()),
1286                type_guid: Some(fblock::Guid { value: [0xffu8; 16] }),
1287                ..Default::default()
1288            })
1289            .await
1290            .expect("FIDL error")
1291            .expect("Failed to update_metadata");
1292        part_1_proxy
1293            .update_metadata(fpartitions::PartitionUpdateMetadataRequest {
1294                transaction: Some(transaction.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()),
1295                flags: Some(1234),
1296                ..Default::default()
1297            })
1298            .await
1299            .expect("FIDL error")
1300            .expect("Failed to update_metadata");
1301
1302        observer.0.store(true, Ordering::Relaxed); // Fail the next write
1303        runner.commit_transaction(transaction).await.expect_err("Commit transaction should fail");
1304
1305        // Ensure the changes did not get applied.
1306        let part_0_block =
1307            connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&part_0_dir, "volume")
1308                .expect("Failed to open Volume service");
1309        let (status, guid) = part_0_block.get_type_guid().await.expect("FIDL error");
1310        assert_eq!(zx::Status::from_raw(status), zx::Status::OK);
1311        assert_eq!(guid.unwrap().value, [2u8; 16]);
1312        let part_1_block =
1313            connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&part_1_dir, "volume")
1314                .expect("Failed to open Volume service");
1315        let metadata =
1316            part_1_block.get_metadata().await.expect("FIDL error").expect("get_metadata failed");
1317        assert_eq!(metadata.type_guid.unwrap().value, PART_TYPE_GUID);
1318        assert_eq!(metadata.flags, Some(0));
1319
1320        runner.shutdown().await;
1321    }
1322
1323    #[fuchsia::test]
1324    async fn reset_partition_tables() {
1325        // The test will reset the tables from ["part", "part2"] to
1326        // ["part3", <empty>, "part4", <125 empty entries>].
1327        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
1328        const PART_1_INSTANCE_GUID: [u8; 16] = [2u8; 16];
1329        const PART_1_NAME: &str = "part";
1330        const PART_2_INSTANCE_GUID: [u8; 16] = [3u8; 16];
1331        const PART_2_NAME: &str = "part2";
1332        const PART_3_NAME: &str = "part3";
1333        const PART_4_NAME: &str = "part4";
1334
1335        let (block_device, partitions_dir) = setup(
1336            512,
1337            1048576 / 512,
1338            vec![
1339                PartitionInfo {
1340                    label: PART_1_NAME.to_string(),
1341                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
1342                    instance_guid: Guid::from_bytes(PART_1_INSTANCE_GUID),
1343                    start_block: 4,
1344                    num_blocks: 1,
1345                    flags: 0,
1346                },
1347                PartitionInfo {
1348                    label: PART_2_NAME.to_string(),
1349                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
1350                    instance_guid: Guid::from_bytes(PART_2_INSTANCE_GUID),
1351                    start_block: 5,
1352                    num_blocks: 1,
1353                    flags: 0,
1354                },
1355            ],
1356        )
1357        .await;
1358        let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1359            .await
1360            .expect("load should succeed");
1361        let nil_entry = PartitionInfo {
1362            label: "".to_string(),
1363            type_guid: Guid::from_bytes([0u8; 16]),
1364            instance_guid: Guid::from_bytes([0u8; 16]),
1365            start_block: 0,
1366            num_blocks: 0,
1367            flags: 0,
1368        };
1369        let mut new_partitions = vec![nil_entry; 128];
1370        new_partitions[0] = PartitionInfo {
1371            label: PART_3_NAME.to_string(),
1372            type_guid: Guid::from_bytes(PART_TYPE_GUID),
1373            instance_guid: Guid::from_bytes([1u8; 16]),
1374            start_block: 64,
1375            num_blocks: 2,
1376            flags: 0,
1377        };
1378        new_partitions[2] = PartitionInfo {
1379            label: PART_4_NAME.to_string(),
1380            type_guid: Guid::from_bytes(PART_TYPE_GUID),
1381            instance_guid: Guid::from_bytes([2u8; 16]),
1382            start_block: 66,
1383            num_blocks: 4,
1384            flags: 0,
1385        };
1386        runner.reset_partition_table(new_partitions).await.expect("reset_partition_table failed");
1387        partitions_dir.get_entry("part-000").expect("No entry found");
1388        partitions_dir.get_entry("part-001").map(|_| ()).expect_err("Extra entry found");
1389        partitions_dir.get_entry("part-002").expect("No entry found");
1390
1391        let proxy = vfs::serve_directory(
1392            partitions_dir.clone(),
1393            vfs::path::Path::validate_and_split("part-000").unwrap(),
1394            fio::PERM_READABLE,
1395        );
1396        let block = connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&proxy, "volume")
1397            .expect("Failed to open block service");
1398        let (status, name) = block.get_name().await.expect("FIDL error");
1399        assert_eq!(zx::Status::from_raw(status), zx::Status::OK);
1400        assert_eq!(name.unwrap(), PART_3_NAME);
1401
1402        runner.shutdown().await;
1403    }
1404
1405    #[fuchsia::test]
1406    async fn reset_partition_tables_fails_if_too_many_partitions() {
1407        let (block_device, partitions_dir) = setup(512, 8, vec![]).await;
1408        let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1409            .await
1410            .expect("load should succeed");
1411        let nil_entry = PartitionInfo {
1412            label: "".to_string(),
1413            type_guid: Guid::from_bytes([0u8; 16]),
1414            instance_guid: Guid::from_bytes([0u8; 16]),
1415            start_block: 0,
1416            num_blocks: 0,
1417            flags: 0,
1418        };
1419        let new_partitions = vec![nil_entry; 128];
1420        runner
1421            .reset_partition_table(new_partitions)
1422            .await
1423            .expect_err("reset_partition_table should fail");
1424
1425        runner.shutdown().await;
1426    }
1427
1428    #[fuchsia::test]
1429    async fn reset_partition_tables_fails_if_too_large_partitions() {
1430        let (block_device, partitions_dir) = setup(512, 64, vec![]).await;
1431        let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1432            .await
1433            .expect("load should succeed");
1434        let new_partitions = vec![
1435            PartitionInfo {
1436                label: "a".to_string(),
1437                type_guid: Guid::from_bytes([1u8; 16]),
1438                instance_guid: Guid::from_bytes([1u8; 16]),
1439                start_block: 4,
1440                num_blocks: 2,
1441                flags: 0,
1442            },
1443            PartitionInfo {
1444                label: "b".to_string(),
1445                type_guid: Guid::from_bytes([2u8; 16]),
1446                instance_guid: Guid::from_bytes([2u8; 16]),
1447                start_block: 6,
1448                num_blocks: 200,
1449                flags: 0,
1450            },
1451        ];
1452        runner
1453            .reset_partition_table(new_partitions)
1454            .await
1455            .expect_err("reset_partition_table should fail");
1456
1457        runner.shutdown().await;
1458    }
1459
1460    #[fuchsia::test]
1461    async fn reset_partition_tables_fails_if_partition_overlaps_metadata() {
1462        let (block_device, partitions_dir) = setup(512, 64, vec![]).await;
1463        let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1464            .await
1465            .expect("load should succeed");
1466        let new_partitions = vec![PartitionInfo {
1467            label: "a".to_string(),
1468            type_guid: Guid::from_bytes([1u8; 16]),
1469            instance_guid: Guid::from_bytes([1u8; 16]),
1470            start_block: 1,
1471            num_blocks: 2,
1472            flags: 0,
1473        }];
1474        runner
1475            .reset_partition_table(new_partitions)
1476            .await
1477            .expect_err("reset_partition_table should fail");
1478
1479        runner.shutdown().await;
1480    }
1481
1482    #[fuchsia::test]
1483    async fn reset_partition_tables_fails_if_partitions_overlap() {
1484        let (block_device, partitions_dir) = setup(512, 64, vec![]).await;
1485        let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1486            .await
1487            .expect("load should succeed");
1488        let new_partitions = vec![
1489            PartitionInfo {
1490                label: "a".to_string(),
1491                type_guid: Guid::from_bytes([1u8; 16]),
1492                instance_guid: Guid::from_bytes([1u8; 16]),
1493                start_block: 32,
1494                num_blocks: 2,
1495                flags: 0,
1496            },
1497            PartitionInfo {
1498                label: "b".to_string(),
1499                type_guid: Guid::from_bytes([2u8; 16]),
1500                instance_guid: Guid::from_bytes([2u8; 16]),
1501                start_block: 33,
1502                num_blocks: 1,
1503                flags: 0,
1504            },
1505        ];
1506        runner
1507            .reset_partition_table(new_partitions)
1508            .await
1509            .expect_err("reset_partition_table should fail");
1510
1511        runner.shutdown().await;
1512    }
1513
1514    #[fuchsia::test]
1515    async fn add_partition() {
1516        let (block_device, partitions_dir) = setup(512, 64, vec![PartitionInfo::nil(); 64]).await;
1517        let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1518            .await
1519            .expect("load should succeed");
1520
1521        let transaction = runner.create_transaction().await.expect("Create transaction failed");
1522        let request = fpartitions::PartitionsManagerAddPartitionRequest {
1523            transaction: Some(transaction.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()),
1524            name: Some("a".to_string()),
1525            type_guid: Some(fblock::Guid { value: [1u8; 16] }),
1526            num_blocks: Some(2),
1527            ..Default::default()
1528        };
1529        runner.add_partition(request).await.expect("add_partition failed");
1530        runner.commit_transaction(transaction).await.expect("add_partition failed");
1531
1532        let proxy = vfs::serve_directory(
1533            partitions_dir.clone(),
1534            vfs::path::Path::validate_and_split("part-000").unwrap(),
1535            fio::PERM_READABLE,
1536        );
1537        let block = connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&proxy, "volume")
1538            .expect("Failed to open block service");
1539        let client: RemoteBlockClient =
1540            RemoteBlockClient::new(block).await.expect("Failed to create block client");
1541
1542        assert_eq!(client.block_count(), 2);
1543        assert_eq!(client.block_size(), 512);
1544
1545        runner.shutdown().await;
1546    }
1547
1548    #[fuchsia::test]
1549    async fn partition_info() {
1550        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
1551        const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
1552        const PART_NAME: &str = "part";
1553
1554        let (block_device, partitions_dir) = setup_with_options(
1555            VmoBackedServerOptions {
1556                initial_contents: InitialContents::FromCapacity(16),
1557                block_size: 512,
1558                info: DeviceInfo::Block(BlockInfo {
1559                    max_transfer_blocks: NonZero::new(2),
1560                    device_flags: BlockDeviceFlag::READONLY
1561                        | BlockDeviceFlag::REMOVABLE
1562                        | BlockDeviceFlag::ZSTD_DECOMPRESSION_SUPPORT,
1563                    ..Default::default()
1564                }),
1565                ..Default::default()
1566            },
1567            vec![PartitionInfo {
1568                label: PART_NAME.to_string(),
1569                type_guid: Guid::from_bytes(PART_TYPE_GUID),
1570                instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
1571                start_block: 4,
1572                num_blocks: 1,
1573                flags: 0xabcd,
1574            }],
1575        )
1576        .await;
1577
1578        let partitions_dir_clone = partitions_dir.clone();
1579        let runner = GptManager::new(block_device.connect(), partitions_dir_clone)
1580            .await
1581            .expect("load should succeed");
1582
1583        let part_dir = vfs::serve_directory(
1584            partitions_dir.clone(),
1585            vfs::path::Path::validate_and_split("part-000").unwrap(),
1586            fio::PERM_READABLE,
1587        );
1588        let part_block =
1589            connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&part_dir, "volume")
1590                .expect("Failed to open Volume service");
1591        let info: fblock::BlockInfo =
1592            part_block.get_info().await.expect("FIDL error").expect("get_info failed");
1593        assert_eq!(info.block_count, 1);
1594        assert_eq!(info.block_size, 512);
1595        assert_eq!(
1596            info.flags,
1597            BlockDeviceFlag::READONLY
1598                | BlockDeviceFlag::REMOVABLE
1599                | BlockDeviceFlag::ZSTD_DECOMPRESSION_SUPPORT
1600        );
1601        assert_eq!(info.max_transfer_size, 1024);
1602
1603        let metadata: fblock::BlockGetMetadataResponse =
1604            part_block.get_metadata().await.expect("FIDL error").expect("get_metadata failed");
1605        assert_eq!(metadata.name, Some(PART_NAME.to_string()));
1606        assert_eq!(metadata.type_guid.unwrap().value, PART_TYPE_GUID);
1607        assert_eq!(metadata.instance_guid.unwrap().value, PART_INSTANCE_GUID);
1608        assert_eq!(metadata.start_block_offset, Some(4));
1609        assert_eq!(metadata.num_blocks, Some(1));
1610        assert_eq!(metadata.flags, Some(0xabcd));
1611
1612        runner.shutdown().await;
1613    }
1614
1615    #[fuchsia::test]
1616    async fn nested_gpt() {
1617        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
1618        const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
1619        const PART_NAME: &str = "part";
1620
1621        let vmo = zx::Vmo::create(64 * 512).unwrap();
1622        let vmo_clone = vmo.create_child(zx::VmoChildOptions::REFERENCE, 0, 0).unwrap();
1623        let (outer_block_device, outer_partitions_dir) = setup_with_options(
1624            VmoBackedServerOptions {
1625                initial_contents: InitialContents::FromVmo(vmo_clone),
1626                block_size: 512,
1627                info: DeviceInfo::Block(BlockInfo {
1628                    device_flags: BlockDeviceFlag::READONLY | BlockDeviceFlag::REMOVABLE,
1629                    ..Default::default()
1630                }),
1631                ..Default::default()
1632            },
1633            vec![PartitionInfo {
1634                label: PART_NAME.to_string(),
1635                type_guid: Guid::from_bytes(PART_TYPE_GUID),
1636                instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
1637                start_block: 4,
1638                num_blocks: 16,
1639                flags: 0xabcd,
1640            }],
1641        )
1642        .await;
1643
1644        let outer_partitions_dir_clone = outer_partitions_dir.clone();
1645        let outer_runner =
1646            GptManager::new(outer_block_device.connect(), outer_partitions_dir_clone)
1647                .await
1648                .expect("load should succeed");
1649
1650        let outer_part_dir = vfs::serve_directory(
1651            outer_partitions_dir.clone(),
1652            vfs::path::Path::validate_and_split("part-000").unwrap(),
1653            fio::PERM_READABLE,
1654        );
1655        let part_block =
1656            connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&outer_part_dir, "volume")
1657                .expect("Failed to open Block service");
1658
1659        let client = Arc::new(RemoteBlockClient::new(part_block.clone()).await.unwrap());
1660        let _ = gpt::Gpt::format(
1661            client,
1662            vec![PartitionInfo {
1663                label: PART_NAME.to_string(),
1664                type_guid: Guid::from_bytes(PART_TYPE_GUID),
1665                instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
1666                start_block: 5,
1667                num_blocks: 1,
1668                flags: 0xabcd,
1669            }],
1670        )
1671        .await
1672        .unwrap();
1673
1674        let partitions_dir = vfs::directory::immutable::simple();
1675        let partitions_dir_clone = partitions_dir.clone();
1676        let runner =
1677            GptManager::new(part_block, partitions_dir_clone).await.expect("load should succeed");
1678        let part_dir = vfs::serve_directory(
1679            partitions_dir.clone(),
1680            vfs::path::Path::validate_and_split("part-000").unwrap(),
1681            fio::PERM_READABLE,
1682        );
1683        let inner_part_block =
1684            connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&part_dir, "volume")
1685                .expect("Failed to open Block service");
1686
1687        let client =
1688            RemoteBlockClient::new(inner_part_block).await.expect("Failed to create block client");
1689        assert_eq!(client.block_count(), 1);
1690        assert_eq!(client.block_size(), 512);
1691
1692        let buffer = vec![0xaa; 512];
1693        client.write_at(BufferSlice::Memory(&buffer), 0).await.unwrap();
1694        client
1695            .write_at(BufferSlice::Memory(&buffer), 512)
1696            .await
1697            .expect_err("Write past end should fail");
1698        client.flush().await.unwrap();
1699
1700        runner.shutdown().await;
1701        outer_runner.shutdown().await;
1702
1703        // Check that the write targeted the correct block (4 + 5 = 9)
1704        let data = vmo.read_to_vec::<u8>(9 * 512, 512).unwrap();
1705        assert_eq!(&data[..], &buffer[..]);
1706    }
1707
1708    #[fuchsia::test]
1709    async fn offset_map_does_not_allow_partition_overwrite() {
1710        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
1711        const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
1712        const PART_NAME: &str = "part";
1713
1714        let (block_device, partitions_dir) = setup_with_options(
1715            VmoBackedServerOptions {
1716                initial_contents: InitialContents::FromCapacity(16),
1717                block_size: 512,
1718                info: DeviceInfo::Block(BlockInfo {
1719                    device_flags: fblock::DeviceFlag::READONLY | fblock::DeviceFlag::REMOVABLE,
1720                    ..Default::default()
1721                }),
1722                ..Default::default()
1723            },
1724            vec![PartitionInfo {
1725                label: PART_NAME.to_string(),
1726                type_guid: Guid::from_bytes(PART_TYPE_GUID),
1727                instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
1728                start_block: 4,
1729                num_blocks: 2,
1730                flags: 0xabcd,
1731            }],
1732        )
1733        .await;
1734
1735        let partitions_dir_clone = partitions_dir.clone();
1736        let runner = GptManager::new(block_device.connect(), partitions_dir_clone)
1737            .await
1738            .expect("load should succeed");
1739
1740        let part_dir = vfs::serve_directory(
1741            partitions_dir.clone(),
1742            vfs::path::Path::validate_and_split("part-000").unwrap(),
1743            fio::PERM_READABLE,
1744        );
1745
1746        let part_block =
1747            connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&part_dir, "volume")
1748                .expect("Failed to open Block service");
1749
1750        // Attempting to open a session with an offset map that extends past the end of the device
1751        // should fail.
1752        let (session, server_end) = fidl::endpoints::create_proxy::<fblock::SessionMarker>();
1753        part_block
1754            .open_session_with_offset_map(
1755                server_end,
1756                &fblock::BlockOffsetMapping {
1757                    source_block_offset: 0,
1758                    target_block_offset: 1,
1759                    length: 2,
1760                },
1761            )
1762            .expect("FIDL error");
1763        session.get_fifo().await.expect_err("Session should be closed");
1764
1765        let (session, server_end) = fidl::endpoints::create_proxy::<fblock::SessionMarker>();
1766        part_block
1767            .open_session_with_offset_map(
1768                server_end,
1769                &fblock::BlockOffsetMapping {
1770                    source_block_offset: 0,
1771                    target_block_offset: 0,
1772                    length: 3,
1773                },
1774            )
1775            .expect("FIDL error");
1776        session.get_fifo().await.expect_err("Session should be closed");
1777
1778        runner.shutdown().await;
1779    }
1780}