gpt_component/
gpt.rs

1// Copyright 2024 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use crate::config::Config;
6use crate::partition::PartitionBackend;
7use crate::partitions_directory::PartitionsDirectory;
8use anyhow::{Context as _, Error, anyhow};
9use block_client::{
10    BlockClient as _, BufferSlice, MutableBufferSlice, ReadOptions, RemoteBlockClient, VmoId,
11    WriteOptions,
12};
13use block_server::BlockServer;
14use block_server::async_interface::SessionManager;
15
16use fidl::endpoints::ServerEnd;
17use fuchsia_sync::Mutex;
18use futures::stream::TryStreamExt as _;
19use std::collections::BTreeMap;
20use std::num::NonZero;
21use std::sync::atomic::{AtomicBool, Ordering};
22use std::sync::{Arc, Weak};
23use zx::AsHandleRef as _;
24use {
25    fidl_fuchsia_hardware_block as fblock, fidl_fuchsia_storage_partitions as fpartitions,
26    fuchsia_async as fasync,
27};
28
29fn partition_directory_entry_name(index: u32) -> String {
30    format!("part-{:03}", index)
31}
32
33/// A single partition in a GPT device.
34pub struct GptPartition {
35    gpt: Weak<GptManager>,
36    info: Mutex<gpt::PartitionInfo>,
37    block_client: Arc<RemoteBlockClient>,
38}
39
40fn trace_id(trace_flow_id: Option<NonZero<u64>>) -> u64 {
41    trace_flow_id.map(|v| v.get()).unwrap_or_default()
42}
43
44impl GptPartition {
45    pub fn new(
46        gpt: &Arc<GptManager>,
47        block_client: Arc<RemoteBlockClient>,
48        info: gpt::PartitionInfo,
49    ) -> Arc<Self> {
50        Arc::new(Self { gpt: Arc::downgrade(gpt), info: Mutex::new(info), block_client })
51    }
52
53    pub async fn terminate(&self) {
54        if let Err(error) = self.block_client.close().await {
55            log::warn!(error:?; "Failed to close block client");
56        }
57    }
58
59    /// Replaces the partition info, returning its old value.
60    pub fn update_info(&self, info: gpt::PartitionInfo) -> gpt::PartitionInfo {
61        std::mem::replace(&mut *self.info.lock(), info)
62    }
63
64    pub fn block_size(&self) -> u32 {
65        self.block_client.block_size()
66    }
67
68    pub fn block_count(&self) -> u64 {
69        self.info.lock().num_blocks
70    }
71
72    pub async fn attach_vmo(&self, vmo: &zx::Vmo) -> Result<VmoId, zx::Status> {
73        self.block_client.attach_vmo(vmo).await
74    }
75
76    pub async fn detach_vmo(&self, vmoid: VmoId) -> Result<(), zx::Status> {
77        self.block_client.detach_vmo(vmoid).await
78    }
79
80    pub fn open_passthrough_session(&self, session: ServerEnd<fblock::SessionMarker>) {
81        if let Some(gpt) = self.gpt.upgrade() {
82            let mapping = {
83                let info = self.info.lock();
84                fblock::BlockOffsetMapping {
85                    source_block_offset: 0,
86                    target_block_offset: info.start_block,
87                    length: info.num_blocks,
88                }
89            };
90            if let Err(err) = gpt.block_proxy.open_session_with_offset_map(session, &mapping) {
91                // Client errors normally come back on `session` but that was already consumed.  The
92                // client will get a PEER_CLOSED without an epitaph.
93                log::warn!(err:?; "Failed to open passthrough session");
94            }
95        } else {
96            if let Err(err) = session.close_with_epitaph(zx::Status::BAD_STATE) {
97                log::warn!(err:?; "Failed to send session epitaph");
98            }
99        }
100    }
101
102    pub fn get_info(&self) -> block_server::DeviceInfo {
103        convert_partition_info(
104            &*self.info.lock(),
105            self.block_client.block_flags(),
106            self.block_client.max_transfer_blocks(),
107        )
108    }
109
110    pub async fn read(
111        &self,
112        device_block_offset: u64,
113        block_count: u32,
114        vmo_id: &VmoId,
115        vmo_offset: u64, // *bytes* not blocks
116        opts: ReadOptions,
117        trace_flow_id: Option<NonZero<u64>>,
118    ) -> Result<(), zx::Status> {
119        let dev_offset = self
120            .absolute_offset(device_block_offset, block_count)
121            .map(|offset| offset * self.block_size() as u64)?;
122        let buffer = MutableBufferSlice::new_with_vmo_id(
123            vmo_id,
124            vmo_offset,
125            (block_count * self.block_size()) as u64,
126        );
127        self.block_client
128            .read_at_with_opts_traced(buffer, dev_offset, opts, trace_id(trace_flow_id))
129            .await
130    }
131
132    pub fn barrier(&self) {
133        self.block_client.barrier();
134    }
135
136    pub async fn write(
137        &self,
138        device_block_offset: u64,
139        block_count: u32,
140        vmo_id: &VmoId,
141        vmo_offset: u64, // *bytes* not blocks
142        write_opts: WriteOptions,
143        trace_flow_id: Option<NonZero<u64>>,
144    ) -> Result<(), zx::Status> {
145        let dev_offset = self
146            .absolute_offset(device_block_offset, block_count)
147            .map(|offset| offset * self.block_size() as u64)?;
148        let buffer = BufferSlice::new_with_vmo_id(
149            vmo_id,
150            vmo_offset,
151            (block_count * self.block_size()) as u64,
152        );
153        self.block_client
154            .write_at_with_opts_traced(buffer, dev_offset, write_opts, trace_id(trace_flow_id))
155            .await
156    }
157
158    pub async fn flush(&self, trace_flow_id: Option<NonZero<u64>>) -> Result<(), zx::Status> {
159        self.block_client.flush_traced(trace_id(trace_flow_id)).await
160    }
161
162    pub async fn trim(
163        &self,
164        device_block_offset: u64,
165        block_count: u32,
166        trace_flow_id: Option<NonZero<u64>>,
167    ) -> Result<(), zx::Status> {
168        let dev_offset = self
169            .absolute_offset(device_block_offset, block_count)
170            .map(|offset| offset * self.block_size() as u64)?;
171        let len = block_count as u64 * self.block_size() as u64;
172        self.block_client.trim_traced(dev_offset..dev_offset + len, trace_id(trace_flow_id)).await
173    }
174
175    // Converts a relative range specified by [offset, offset+len) into an absolute offset in the
176    // GPT device, performing bounds checking within the partition.  Returns ZX_ERR_OUT_OF_RANGE for
177    // an invalid offset/len.
178    fn absolute_offset(&self, mut offset: u64, len: u32) -> Result<u64, zx::Status> {
179        let info = self.info.lock();
180        offset = offset.checked_add(info.start_block).ok_or(zx::Status::OUT_OF_RANGE)?;
181        let end = offset.checked_add(len as u64).ok_or(zx::Status::OUT_OF_RANGE)?;
182        if end > info.start_block + info.num_blocks {
183            Err(zx::Status::OUT_OF_RANGE)
184        } else {
185            Ok(offset)
186        }
187    }
188}
189
190fn convert_partition_info(
191    info: &gpt::PartitionInfo,
192    device_flags: fblock::Flag,
193    max_transfer_blocks: Option<NonZero<u32>>,
194) -> block_server::DeviceInfo {
195    block_server::DeviceInfo::Partition(block_server::PartitionInfo {
196        device_flags,
197        max_transfer_blocks,
198        block_range: Some(info.start_block..info.start_block + info.num_blocks),
199        type_guid: info.type_guid.to_bytes(),
200        instance_guid: info.instance_guid.to_bytes(),
201        name: info.label.clone(),
202        flags: info.flags,
203    })
204}
205
206fn can_merge(a: &gpt::PartitionInfo, b: &gpt::PartitionInfo) -> bool {
207    a.start_block + a.num_blocks == b.start_block
208}
209
210struct PendingTransaction {
211    transaction: gpt::Transaction,
212    client_koid: zx::Koid,
213    // A list of indexes for partitions which were added in the transaction.  When committing, all
214    // newly created partitions are published.
215    added_partitions: Vec<u32>,
216    // A task which waits for the client end to be closed and clears the pending transaction.
217    _signal_task: fasync::Task<()>,
218}
219
220struct Inner {
221    gpt: gpt::Gpt,
222    partitions: BTreeMap<u32, Arc<BlockServer<SessionManager<PartitionBackend>>>>,
223    // Exposes all partitions for discovery by other components.  Should be kept in sync with
224    // `partitions`.
225    partitions_dir: PartitionsDirectory,
226    pending_transaction: Option<PendingTransaction>,
227}
228
229impl Inner {
230    /// Ensures that `transaction` matches our pending transaction.
231    fn ensure_transaction_matches(&self, transaction: &zx::EventPair) -> Result<(), zx::Status> {
232        if let Some(pending) = self.pending_transaction.as_ref() {
233            if transaction.get_koid()? == pending.client_koid {
234                Ok(())
235            } else {
236                Err(zx::Status::BAD_HANDLE)
237            }
238        } else {
239            Err(zx::Status::BAD_STATE)
240        }
241    }
242
243    async fn bind_partition(
244        &mut self,
245        parent: &Arc<GptManager>,
246        index: u32,
247        info: gpt::PartitionInfo,
248        overlay_indexes: Vec<usize>,
249    ) -> Result<(), Error> {
250        log::trace!(
251            "GPT part {index}{}: {info:?}",
252            if !overlay_indexes.is_empty() { " (overlay)" } else { "" }
253        );
254        info.start_block
255            .checked_add(info.num_blocks)
256            .ok_or_else(|| anyhow!("Overflow in partition end"))?;
257        let partition =
258            PartitionBackend::new(GptPartition::new(parent, self.gpt.client().clone(), info));
259        let block_server = Arc::new(BlockServer::new(parent.block_size, partition));
260        if !overlay_indexes.is_empty() {
261            self.partitions_dir.add_overlay(
262                &partition_directory_entry_name(index),
263                Arc::downgrade(&block_server),
264                Arc::downgrade(parent),
265                overlay_indexes,
266            );
267        } else {
268            self.partitions_dir.add_partition(
269                &partition_directory_entry_name(index),
270                Arc::downgrade(&block_server),
271                Arc::downgrade(parent),
272                index as usize,
273            );
274        }
275        self.partitions.insert(index, block_server);
276        Ok(())
277    }
278
279    async fn bind_super_and_userdata_partition(
280        &mut self,
281        parent: &Arc<GptManager>,
282        super_partition: (u32, gpt::PartitionInfo),
283        userdata_partition: (u32, gpt::PartitionInfo),
284    ) -> Result<(), Error> {
285        let info = gpt::PartitionInfo {
286            label: "super_and_userdata".to_string(),
287            type_guid: super_partition.1.type_guid.clone(),
288            instance_guid: super_partition.1.instance_guid.clone(),
289            start_block: super_partition.1.start_block,
290            num_blocks: super_partition.1.num_blocks + userdata_partition.1.num_blocks,
291            flags: super_partition.1.flags,
292        };
293        log::trace!(
294            "GPT merged parts {:?} + {:?} -> {info:?}",
295            super_partition.1,
296            userdata_partition.1
297        );
298        self.bind_partition(
299            parent,
300            super_partition.0,
301            info,
302            vec![super_partition.0 as usize, userdata_partition.0 as usize],
303        )
304        .await
305    }
306
307    async fn bind_all_partitions(&mut self, parent: &Arc<GptManager>) -> Result<(), Error> {
308        self.partitions.clear();
309        self.partitions_dir.clear();
310
311        let mut partitions = self.gpt.partitions().clone();
312        if parent.config.merge_super_and_userdata {
313            // Attempt to merge the first `super` and `userdata` we find.  The rest will be treated
314            // as regular partitions.
315            let super_part = match partitions
316                .iter()
317                .find(|(_, info)| info.label == "super")
318                .map(|(index, _)| *index)
319            {
320                Some(index) => partitions.remove_entry(&index),
321                None => None,
322            };
323            let userdata_part = match partitions
324                .iter()
325                .find(|(_, info)| info.label == "userdata")
326                .map(|(index, _)| *index)
327            {
328                Some(index) => partitions.remove_entry(&index),
329                None => None,
330            };
331            if super_part.is_some() && userdata_part.is_some() {
332                let super_part = super_part.unwrap();
333                let userdata_part = userdata_part.unwrap();
334                if can_merge(&super_part.1, &userdata_part.1) {
335                    self.bind_super_and_userdata_partition(parent, super_part, userdata_part)
336                        .await?;
337                } else {
338                    log::warn!("super/userdata cannot be merged");
339                    self.bind_partition(parent, super_part.0, super_part.1, vec![]).await?;
340                    self.bind_partition(parent, userdata_part.0, userdata_part.1, vec![]).await?;
341                }
342            } else if super_part.is_some() || userdata_part.is_some() {
343                log::warn!("Only one of super/userdata found; not merging");
344                let (index, info) = super_part.or(userdata_part).unwrap();
345                self.bind_partition(parent, index, info, vec![]).await?;
346            }
347        }
348        for (index, info) in partitions {
349            self.bind_partition(parent, index, info, vec![]).await?;
350        }
351        Ok(())
352    }
353
354    fn add_partition(&mut self, info: gpt::PartitionInfo) -> Result<usize, gpt::AddPartitionError> {
355        let pending = self.pending_transaction.as_mut().unwrap();
356        let idx = self.gpt.add_partition(&mut pending.transaction, info)?;
357        pending.added_partitions.push(idx as u32);
358        Ok(idx)
359    }
360}
361
362/// Runs a GPT device.
363pub struct GptManager {
364    config: Config,
365    block_proxy: fblock::BlockProxy,
366    block_size: u32,
367    block_count: u64,
368    inner: futures::lock::Mutex<Inner>,
369    shutdown: AtomicBool,
370}
371
372impl std::fmt::Debug for GptManager {
373    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
374        f.debug_struct("GptManager")
375            .field("block_size", &self.block_size)
376            .field("block_count", &self.block_count)
377            .finish()
378    }
379}
380
381impl GptManager {
382    pub async fn new(
383        block_proxy: fblock::BlockProxy,
384        partitions_dir: Arc<vfs::directory::immutable::Simple>,
385    ) -> Result<Arc<Self>, Error> {
386        Self::new_with_config(block_proxy, partitions_dir, Config::default()).await
387    }
388
389    pub async fn new_with_config(
390        block_proxy: fblock::BlockProxy,
391        partitions_dir: Arc<vfs::directory::immutable::Simple>,
392        config: Config,
393    ) -> Result<Arc<Self>, Error> {
394        log::info!("Binding to GPT");
395        let client = Arc::new(RemoteBlockClient::new(block_proxy.clone()).await?);
396        let block_size = client.block_size();
397        let block_count = client.block_count();
398        let gpt = gpt::Gpt::open(client).await.context("Failed to load GPT")?;
399
400        let this = Arc::new(Self {
401            config,
402            block_proxy,
403            block_size,
404            block_count,
405            inner: futures::lock::Mutex::new(Inner {
406                gpt,
407                partitions: BTreeMap::new(),
408                partitions_dir: PartitionsDirectory::new(partitions_dir),
409                pending_transaction: None,
410            }),
411            shutdown: AtomicBool::new(false),
412        });
413        this.inner.lock().await.bind_all_partitions(&this).await?;
414        log::info!("Starting all partitions OK!");
415        Ok(this)
416    }
417
418    pub fn block_size(&self) -> u32 {
419        self.block_size
420    }
421
422    pub fn block_count(&self) -> u64 {
423        self.block_count
424    }
425
426    pub async fn create_transaction(self: &Arc<Self>) -> Result<zx::EventPair, zx::Status> {
427        let mut inner = self.inner.lock().await;
428        if inner.pending_transaction.is_some() {
429            return Err(zx::Status::ALREADY_EXISTS);
430        }
431        let transaction = inner.gpt.create_transaction().unwrap();
432        let (client_end, server_end) = zx::EventPair::create();
433        let client_koid = client_end.get_koid()?;
434        let signal_waiter = fasync::OnSignals::new(server_end, zx::Signals::EVENTPAIR_PEER_CLOSED);
435        let this = self.clone();
436        let task = fasync::Task::spawn(async move {
437            let _ = signal_waiter.await;
438            let mut inner = this.inner.lock().await;
439            if inner.pending_transaction.as_ref().map_or(false, |t| t.client_koid == client_koid) {
440                inner.pending_transaction = None;
441            }
442        });
443        inner.pending_transaction = Some(PendingTransaction {
444            transaction,
445            client_koid,
446            added_partitions: vec![],
447            _signal_task: task,
448        });
449        Ok(client_end)
450    }
451
452    pub async fn commit_transaction(
453        self: &Arc<Self>,
454        transaction: zx::EventPair,
455    ) -> Result<(), zx::Status> {
456        if let Err((status, old_infos)) = self.commit_transaction_inner(transaction).await {
457            let inner = self.inner.lock().await;
458            for (idx, info) in old_infos {
459                if let Some(part) = inner.partitions.get(&idx) {
460                    part.session_manager().interface().update_info(info);
461                }
462            }
463            Err(status)
464        } else {
465            Ok(())
466        }
467    }
468
469    // On error, returns a vector of state which was modified that needs to be restored.  Scopeguard
470    // doesn't work here because `inner` requires an async lock.
471    async fn commit_transaction_inner(
472        self: &Arc<Self>,
473        transaction: zx::EventPair,
474    ) -> Result<(), (zx::Status, Vec<(u32, gpt::PartitionInfo)>)> {
475        let mut inner = self.inner.lock().await;
476        inner.ensure_transaction_matches(&transaction).map_err(|status| (status, vec![]))?;
477        let pending = std::mem::take(&mut inner.pending_transaction).unwrap();
478        let mut old_infos = vec![];
479        for (info, idx) in pending
480            .transaction
481            .partitions
482            .iter()
483            .zip(0u32..)
484            .filter(|(info, idx)| !info.is_nil() && !pending.added_partitions.contains(idx))
485        {
486            let part = inner.partitions.get(&idx).ok_or_else(|| {
487                log::warn!("Failed to find part {idx}");
488                (zx::Status::BAD_STATE, old_infos.clone())
489            })?;
490            old_infos.push((idx, part.session_manager().interface().update_info(info.clone())));
491        }
492        if let Err(err) = inner.gpt.commit_transaction(pending.transaction).await {
493            log::warn!(err:?; "Failed to commit transaction");
494            return Err((zx::Status::IO, old_infos.clone()));
495        }
496        for idx in pending.added_partitions {
497            let info = inner
498                .gpt
499                .partitions()
500                .get(&idx)
501                .ok_or_else(|| (zx::Status::BAD_STATE, old_infos.clone()))?
502                .clone();
503            inner.bind_partition(self, idx, info, vec![]).await.map_err(|err| {
504                log::error!(err:?; "Failed to bind partition");
505                (zx::Status::BAD_STATE, old_infos.clone())
506            })?;
507        }
508        Ok(())
509    }
510
511    pub async fn add_partition(
512        &self,
513        request: fpartitions::PartitionsManagerAddPartitionRequest,
514    ) -> Result<(), zx::Status> {
515        let mut inner = self.inner.lock().await;
516        inner.ensure_transaction_matches(
517            request.transaction.as_ref().ok_or(zx::Status::BAD_HANDLE)?,
518        )?;
519        let info = gpt::PartitionInfo {
520            label: request.name.ok_or(zx::Status::INVALID_ARGS)?,
521            type_guid: request
522                .type_guid
523                .map(|value| gpt::Guid::from_bytes(value.value))
524                .ok_or(zx::Status::INVALID_ARGS)?,
525            instance_guid: request
526                .instance_guid
527                .map(|value| gpt::Guid::from_bytes(value.value))
528                .unwrap_or_else(|| gpt::Guid::generate()),
529            start_block: 0,
530            num_blocks: request.num_blocks.ok_or(zx::Status::INVALID_ARGS)?,
531            flags: request.flags.unwrap_or_default(),
532        };
533        let idx = inner.add_partition(info)?;
534        let partition =
535            inner.pending_transaction.as_ref().unwrap().transaction.partitions.get(idx).unwrap();
536        log::info!(
537            "Allocated partition {:?} at {:?}",
538            partition.label,
539            partition.start_block..partition.start_block + partition.num_blocks
540        );
541        Ok(())
542    }
543
544    pub async fn handle_partitions_requests(
545        &self,
546        gpt_index: usize,
547        mut requests: fpartitions::PartitionRequestStream,
548    ) -> Result<(), zx::Status> {
549        while let Some(request) = requests.try_next().await.unwrap() {
550            match request {
551                fpartitions::PartitionRequest::UpdateMetadata { payload, responder } => {
552                    responder
553                        .send(
554                            self.update_partition_metadata(gpt_index, payload)
555                                .await
556                                .map_err(|status| status.into_raw()),
557                        )
558                        .unwrap_or_else(
559                            |err| log::error!(err:?; "Failed to send UpdateMetadata response"),
560                        );
561                }
562            }
563        }
564        Ok(())
565    }
566
567    async fn update_partition_metadata(
568        &self,
569        gpt_index: usize,
570        request: fpartitions::PartitionUpdateMetadataRequest,
571    ) -> Result<(), zx::Status> {
572        let mut inner = self.inner.lock().await;
573        inner.ensure_transaction_matches(
574            request.transaction.as_ref().ok_or(zx::Status::BAD_HANDLE)?,
575        )?;
576
577        let transaction = &mut inner.pending_transaction.as_mut().unwrap().transaction;
578        let entry = transaction.partitions.get_mut(gpt_index).ok_or(zx::Status::BAD_STATE)?;
579        if let Some(type_guid) = request.type_guid.as_ref().cloned() {
580            entry.type_guid = gpt::Guid::from_bytes(type_guid.value);
581        }
582        if let Some(flags) = request.flags.as_ref() {
583            entry.flags = *flags;
584        }
585        Ok(())
586    }
587
588    pub async fn handle_overlay_partitions_requests(
589        &self,
590        gpt_indexes: Vec<usize>,
591        mut requests: fpartitions::OverlayPartitionRequestStream,
592    ) -> Result<(), zx::Status> {
593        while let Some(request) = requests.try_next().await.unwrap() {
594            match request {
595                fpartitions::OverlayPartitionRequest::GetPartitions { responder } => {
596                    match self.get_overlay_partition_info(&gpt_indexes[..]).await {
597                        Ok(partitions) => responder.send(Ok(&partitions[..])),
598                        Err(status) => responder.send(Err(status.into_raw())),
599                    }
600                    .unwrap_or_else(
601                        |err| log::error!(err:?; "Failed to send GetPartitions response"),
602                    );
603                }
604            }
605        }
606        Ok(())
607    }
608
609    async fn get_overlay_partition_info(
610        &self,
611        gpt_indexes: &[usize],
612    ) -> Result<Vec<fpartitions::PartitionInfo>, zx::Status> {
613        fn convert_partition_info(info: &gpt::PartitionInfo) -> fpartitions::PartitionInfo {
614            fpartitions::PartitionInfo {
615                name: info.label.to_string(),
616                type_guid: fidl_fuchsia_hardware_block_partition::Guid {
617                    value: info.type_guid.to_bytes(),
618                },
619                instance_guid: fidl_fuchsia_hardware_block_partition::Guid {
620                    value: info.instance_guid.to_bytes(),
621                },
622                start_block: info.start_block,
623                num_blocks: info.num_blocks,
624                flags: info.flags,
625            }
626        }
627
628        let inner = self.inner.lock().await;
629        let mut partitions = vec![];
630        for index in gpt_indexes {
631            let index: u32 = *index as u32;
632            partitions.push(
633                inner
634                    .gpt
635                    .partitions()
636                    .get(&index)
637                    .map(convert_partition_info)
638                    .ok_or(zx::Status::BAD_STATE)?,
639            );
640        }
641        Ok(partitions)
642    }
643
644    pub async fn reset_partition_table(
645        self: &Arc<Self>,
646        partitions: Vec<gpt::PartitionInfo>,
647    ) -> Result<(), zx::Status> {
648        let mut inner = self.inner.lock().await;
649        if inner.pending_transaction.is_some() {
650            return Err(zx::Status::BAD_STATE);
651        }
652
653        log::info!("Resetting gpt.  Expect data loss!!!");
654        let mut transaction = inner.gpt.create_transaction().unwrap();
655        transaction.partitions = partitions;
656        inner.gpt.commit_transaction(transaction).await?;
657
658        if let Err(err) = inner.bind_all_partitions(&self).await {
659            log::error!(err:?; "Failed to rebind partitions");
660            return Err(zx::Status::BAD_STATE);
661        }
662        log::info!("Rebinding partitions OK!");
663        Ok(())
664    }
665
666    pub async fn shutdown(self: Arc<Self>) {
667        log::info!("Shutting down gpt");
668        let mut inner = self.inner.lock().await;
669        inner.partitions_dir.clear();
670        inner.partitions.clear();
671        self.shutdown.store(true, Ordering::Relaxed);
672        log::info!("Shutting down gpt OK");
673    }
674}
675
676impl Drop for GptManager {
677    fn drop(&mut self) {
678        assert!(self.shutdown.load(Ordering::Relaxed), "Did you forget to shutdown?");
679    }
680}
681
682#[cfg(test)]
683mod tests {
684    use super::GptManager;
685    use block_client::{
686        BlockClient as _, BufferSlice, MutableBufferSlice, RemoteBlockClient, WriteFlags,
687    };
688    use block_server::{BlockInfo, DeviceInfo, WriteOptions};
689    use fidl::HandleBased as _;
690    use fuchsia_component::client::connect_to_named_protocol_at_dir_root;
691    use gpt::{Gpt, Guid, PartitionInfo};
692    use std::num::NonZero;
693    use std::sync::Arc;
694    use std::sync::atomic::{AtomicBool, Ordering};
695    use vmo_backed_block_server::{
696        InitialContents, VmoBackedServer, VmoBackedServerOptions, VmoBackedServerTestingExt as _,
697    };
698    use {
699        fidl_fuchsia_hardware_block as fblock, fidl_fuchsia_hardware_block_volume as fvolume,
700        fidl_fuchsia_io as fio, fidl_fuchsia_storage_partitions as fpartitions,
701        fuchsia_async as fasync,
702    };
703
704    async fn setup(
705        block_size: u32,
706        block_count: u64,
707        partitions: Vec<PartitionInfo>,
708    ) -> (Arc<VmoBackedServer>, Arc<vfs::directory::immutable::Simple>) {
709        setup_with_options(
710            VmoBackedServerOptions {
711                initial_contents: InitialContents::FromCapacity(block_count),
712                block_size,
713                ..Default::default()
714            },
715            partitions,
716        )
717        .await
718    }
719
720    async fn setup_with_options(
721        opts: VmoBackedServerOptions<'_>,
722        partitions: Vec<PartitionInfo>,
723    ) -> (Arc<VmoBackedServer>, Arc<vfs::directory::immutable::Simple>) {
724        let server = Arc::new(opts.build().unwrap());
725        {
726            let (block_client, block_server) =
727                fidl::endpoints::create_proxy::<fblock::BlockMarker>();
728            let volume_stream = fidl::endpoints::ServerEnd::<fvolume::VolumeMarker>::from(
729                block_server.into_channel(),
730            )
731            .into_stream();
732            let server_clone = server.clone();
733            let _task = fasync::Task::spawn(async move { server_clone.serve(volume_stream).await });
734            let client = Arc::new(RemoteBlockClient::new(block_client).await.unwrap());
735            Gpt::format(client, partitions).await.unwrap();
736        }
737        (server, vfs::directory::immutable::simple())
738    }
739
740    #[fuchsia::test]
741    async fn load_unformatted_gpt() {
742        let vmo = zx::Vmo::create(4096).unwrap();
743        let server = Arc::new(VmoBackedServer::from_vmo(512, vmo));
744
745        GptManager::new(server.connect(), vfs::directory::immutable::simple())
746            .await
747            .expect_err("load should fail");
748    }
749
750    #[fuchsia::test]
751    async fn load_formatted_empty_gpt() {
752        let (block_device, partitions_dir) = setup(512, 8, vec![]).await;
753
754        let runner = GptManager::new(block_device.connect(), partitions_dir)
755            .await
756            .expect("load should succeed");
757        runner.shutdown().await;
758    }
759
760    #[fuchsia::test]
761    async fn load_formatted_gpt_with_one_partition() {
762        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
763        const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
764        const PART_NAME: &str = "part";
765
766        let (block_device, partitions_dir) = setup(
767            512,
768            8,
769            vec![PartitionInfo {
770                label: PART_NAME.to_string(),
771                type_guid: Guid::from_bytes(PART_TYPE_GUID),
772                instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
773                start_block: 4,
774                num_blocks: 1,
775                flags: 0,
776            }],
777        )
778        .await;
779
780        let partitions_dir_clone = partitions_dir.clone();
781        let runner = GptManager::new(block_device.connect(), partitions_dir_clone)
782            .await
783            .expect("load should succeed");
784        partitions_dir.get_entry("part-000").expect("No entry found");
785        partitions_dir.get_entry("part-001").map(|_| ()).expect_err("Extra entry found");
786        runner.shutdown().await;
787    }
788
789    #[fuchsia::test]
790    async fn load_formatted_gpt_with_two_partitions() {
791        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
792        const PART_INSTANCE_1_GUID: [u8; 16] = [2u8; 16];
793        const PART_INSTANCE_2_GUID: [u8; 16] = [3u8; 16];
794        const PART_1_NAME: &str = "part1";
795        const PART_2_NAME: &str = "part2";
796
797        let (block_device, partitions_dir) = setup(
798            512,
799            8,
800            vec![
801                PartitionInfo {
802                    label: PART_1_NAME.to_string(),
803                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
804                    instance_guid: Guid::from_bytes(PART_INSTANCE_1_GUID),
805                    start_block: 4,
806                    num_blocks: 1,
807                    flags: 0,
808                },
809                PartitionInfo {
810                    label: PART_2_NAME.to_string(),
811                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
812                    instance_guid: Guid::from_bytes(PART_INSTANCE_2_GUID),
813                    start_block: 5,
814                    num_blocks: 1,
815                    flags: 0,
816                },
817            ],
818        )
819        .await;
820
821        let partitions_dir_clone = partitions_dir.clone();
822        let runner = GptManager::new(block_device.connect(), partitions_dir_clone)
823            .await
824            .expect("load should succeed");
825        partitions_dir.get_entry("part-000").expect("No entry found");
826        partitions_dir.get_entry("part-001").expect("No entry found");
827        partitions_dir.get_entry("part-002").map(|_| ()).expect_err("Extra entry found");
828        runner.shutdown().await;
829    }
830
831    #[fuchsia::test]
832    async fn partition_io() {
833        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
834        const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
835        const PART_NAME: &str = "part";
836
837        let (block_device, partitions_dir) = setup(
838            512,
839            8,
840            vec![PartitionInfo {
841                label: PART_NAME.to_string(),
842                type_guid: Guid::from_bytes(PART_TYPE_GUID),
843                instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
844                start_block: 4,
845                num_blocks: 2,
846                flags: 0,
847            }],
848        )
849        .await;
850
851        let partitions_dir_clone = partitions_dir.clone();
852        let runner = GptManager::new(block_device.connect(), partitions_dir_clone)
853            .await
854            .expect("load should succeed");
855
856        let proxy = vfs::serve_directory(
857            partitions_dir.clone(),
858            vfs::path::Path::validate_and_split("part-000").unwrap(),
859            fio::PERM_READABLE,
860        );
861        let block =
862            connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&proxy, "volume")
863                .expect("Failed to open block service");
864        let client = RemoteBlockClient::new(block).await.expect("Failed to create block client");
865
866        assert_eq!(client.block_count(), 2);
867        assert_eq!(client.block_size(), 512);
868
869        let buf = vec![0xabu8; 512];
870        client.write_at(BufferSlice::Memory(&buf[..]), 0).await.expect("write_at failed");
871        client
872            .write_at(BufferSlice::Memory(&buf[..]), 1024)
873            .await
874            .expect_err("write_at should fail when writing past partition end");
875        let mut buf2 = vec![0u8; 512];
876        client.read_at(MutableBufferSlice::Memory(&mut buf2[..]), 0).await.expect("read_at failed");
877        assert_eq!(buf, buf2);
878        client
879            .read_at(MutableBufferSlice::Memory(&mut buf2[..]), 1024)
880            .await
881            .expect_err("read_at should fail when reading past partition end");
882        client.trim(512..1024).await.expect("trim failed");
883        client.trim(1..512).await.expect_err("trim with invalid range should fail");
884        client.trim(1024..1536).await.expect_err("trim past end of partition should fail");
885        runner.shutdown().await;
886
887        // Ensure writes persisted to the partition.
888        let mut buf = vec![0u8; 512];
889        let client =
890            RemoteBlockClient::new(block_device.connect::<fblock::BlockProxy>()).await.unwrap();
891        client.read_at(MutableBufferSlice::Memory(&mut buf[..]), 2048).await.unwrap();
892        assert_eq!(&buf[..], &[0xabu8; 512]);
893    }
894
895    #[fuchsia::test]
896    async fn load_formatted_gpt_with_invalid_primary_header() {
897        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
898        const PART_INSTANCE_1_GUID: [u8; 16] = [2u8; 16];
899        const PART_INSTANCE_2_GUID: [u8; 16] = [3u8; 16];
900        const PART_1_NAME: &str = "part1";
901        const PART_2_NAME: &str = "part2";
902
903        let (block_device, partitions_dir) = setup(
904            512,
905            8,
906            vec![
907                PartitionInfo {
908                    label: PART_1_NAME.to_string(),
909                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
910                    instance_guid: Guid::from_bytes(PART_INSTANCE_1_GUID),
911                    start_block: 4,
912                    num_blocks: 1,
913                    flags: 0,
914                },
915                PartitionInfo {
916                    label: PART_2_NAME.to_string(),
917                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
918                    instance_guid: Guid::from_bytes(PART_INSTANCE_2_GUID),
919                    start_block: 5,
920                    num_blocks: 1,
921                    flags: 0,
922                },
923            ],
924        )
925        .await;
926        {
927            let (client, stream) =
928                fidl::endpoints::create_proxy_and_stream::<fvolume::VolumeMarker>();
929            let server = block_device.clone();
930            let _task = fasync::Task::spawn(async move { server.serve(stream).await });
931            let client = RemoteBlockClient::new(client).await.unwrap();
932            client.write_at(BufferSlice::Memory(&[0xffu8; 512]), 512).await.unwrap();
933        }
934
935        let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
936            .await
937            .expect("load should succeed");
938        partitions_dir.get_entry("part-000").expect("No entry found");
939        partitions_dir.get_entry("part-001").expect("No entry found");
940        runner.shutdown().await;
941    }
942
943    #[fuchsia::test]
944    async fn load_formatted_gpt_with_invalid_primary_partition_table() {
945        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
946        const PART_INSTANCE_1_GUID: [u8; 16] = [2u8; 16];
947        const PART_INSTANCE_2_GUID: [u8; 16] = [3u8; 16];
948        const PART_1_NAME: &str = "part1";
949        const PART_2_NAME: &str = "part2";
950
951        let (block_device, partitions_dir) = setup(
952            512,
953            8,
954            vec![
955                PartitionInfo {
956                    label: PART_1_NAME.to_string(),
957                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
958                    instance_guid: Guid::from_bytes(PART_INSTANCE_1_GUID),
959                    start_block: 4,
960                    num_blocks: 1,
961                    flags: 0,
962                },
963                PartitionInfo {
964                    label: PART_2_NAME.to_string(),
965                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
966                    instance_guid: Guid::from_bytes(PART_INSTANCE_2_GUID),
967                    start_block: 5,
968                    num_blocks: 1,
969                    flags: 0,
970                },
971            ],
972        )
973        .await;
974        {
975            let (client, stream) =
976                fidl::endpoints::create_proxy_and_stream::<fvolume::VolumeMarker>();
977            let server = block_device.clone();
978            let _task = fasync::Task::spawn(async move { server.serve(stream).await });
979            let client = RemoteBlockClient::new(client).await.unwrap();
980            client.write_at(BufferSlice::Memory(&[0xffu8; 512]), 1024).await.unwrap();
981        }
982
983        let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
984            .await
985            .expect("load should succeed");
986        partitions_dir.get_entry("part-000").expect("No entry found");
987        partitions_dir.get_entry("part-001").expect("No entry found");
988        runner.shutdown().await;
989    }
990
991    #[fuchsia::test]
992    async fn force_access_passed_through() {
993        const BLOCK_SIZE: u32 = 512;
994        const BLOCK_COUNT: u64 = 1024;
995
996        struct Observer(Arc<AtomicBool>);
997
998        impl vmo_backed_block_server::Observer for Observer {
999            fn write(
1000                &self,
1001                _device_block_offset: u64,
1002                _block_count: u32,
1003                _vmo: &Arc<zx::Vmo>,
1004                _vmo_offset: u64,
1005                opts: WriteOptions,
1006            ) -> vmo_backed_block_server::WriteAction {
1007                assert_eq!(
1008                    opts.flags.contains(WriteFlags::FORCE_ACCESS),
1009                    self.0.load(Ordering::Relaxed)
1010                );
1011                vmo_backed_block_server::WriteAction::Write
1012            }
1013        }
1014
1015        let expect_force_access = Arc::new(AtomicBool::new(false));
1016        let (server, partitions_dir) = setup_with_options(
1017            VmoBackedServerOptions {
1018                initial_contents: InitialContents::FromCapacity(BLOCK_COUNT),
1019                block_size: BLOCK_SIZE,
1020                observer: Some(Box::new(Observer(expect_force_access.clone()))),
1021                ..Default::default()
1022            },
1023            vec![PartitionInfo {
1024                label: "foo".to_string(),
1025                type_guid: Guid::from_bytes([1; 16]),
1026                instance_guid: Guid::from_bytes([2; 16]),
1027                start_block: 4,
1028                num_blocks: 1,
1029                flags: 0,
1030            }],
1031        )
1032        .await;
1033
1034        let manager = GptManager::new(server.connect(), partitions_dir.clone()).await.unwrap();
1035
1036        let proxy = vfs::serve_directory(
1037            partitions_dir.clone(),
1038            vfs::path::Path::validate_and_split("part-000").unwrap(),
1039            fio::PERM_READABLE,
1040        );
1041        let block =
1042            connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&proxy, "volume")
1043                .expect("Failed to open block service");
1044        let client = RemoteBlockClient::new(block).await.expect("Failed to create block client");
1045
1046        let buffer = vec![0; BLOCK_SIZE as usize];
1047        client.write_at(BufferSlice::Memory(&buffer), 0).await.unwrap();
1048
1049        expect_force_access.store(true, Ordering::Relaxed);
1050
1051        client
1052            .write_at_with_opts(
1053                BufferSlice::Memory(&buffer),
1054                0,
1055                WriteOptions { flags: WriteFlags::FORCE_ACCESS, ..Default::default() },
1056            )
1057            .await
1058            .unwrap();
1059
1060        manager.shutdown().await;
1061    }
1062
1063    #[fuchsia::test]
1064    async fn barrier_passed_through() {
1065        const BLOCK_SIZE: u32 = 512;
1066        const BLOCK_COUNT: u64 = 1024;
1067
1068        struct Observer(Arc<AtomicBool>);
1069
1070        impl vmo_backed_block_server::Observer for Observer {
1071            fn barrier(&self) {
1072                self.0.store(true, Ordering::Relaxed);
1073            }
1074        }
1075
1076        let expect_barrier = Arc::new(AtomicBool::new(false));
1077        let (server, partitions_dir) = setup_with_options(
1078            VmoBackedServerOptions {
1079                initial_contents: InitialContents::FromCapacity(BLOCK_COUNT),
1080                block_size: BLOCK_SIZE,
1081                observer: Some(Box::new(Observer(expect_barrier.clone()))),
1082                ..Default::default()
1083            },
1084            vec![PartitionInfo {
1085                label: "foo".to_string(),
1086                type_guid: Guid::from_bytes([1; 16]),
1087                instance_guid: Guid::from_bytes([2; 16]),
1088                start_block: 4,
1089                num_blocks: 1,
1090                flags: 0,
1091            }],
1092        )
1093        .await;
1094
1095        let manager = GptManager::new(server.connect(), partitions_dir.clone()).await.unwrap();
1096
1097        let proxy = vfs::serve_directory(
1098            partitions_dir.clone(),
1099            vfs::path::Path::validate_and_split("part-000").unwrap(),
1100            fio::PERM_READABLE,
1101        );
1102        let block =
1103            connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&proxy, "volume")
1104                .expect("Failed to open block service");
1105        let client = RemoteBlockClient::new(block).await.expect("Failed to create block client");
1106
1107        let buffer = vec![0; BLOCK_SIZE as usize];
1108        client.write_at(BufferSlice::Memory(&buffer), 0).await.unwrap();
1109
1110        client.barrier();
1111        client.write_at(BufferSlice::Memory(&buffer), 0).await.unwrap();
1112
1113        assert!(expect_barrier.load(Ordering::Relaxed));
1114
1115        manager.shutdown().await;
1116    }
1117
1118    #[fuchsia::test]
1119    async fn commit_transaction() {
1120        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
1121        const PART_1_INSTANCE_GUID: [u8; 16] = [2u8; 16];
1122        const PART_1_NAME: &str = "part";
1123        const PART_2_INSTANCE_GUID: [u8; 16] = [3u8; 16];
1124        const PART_2_NAME: &str = "part2";
1125
1126        let (block_device, partitions_dir) = setup(
1127            512,
1128            16,
1129            vec![
1130                PartitionInfo {
1131                    label: PART_1_NAME.to_string(),
1132                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
1133                    instance_guid: Guid::from_bytes(PART_1_INSTANCE_GUID),
1134                    start_block: 4,
1135                    num_blocks: 1,
1136                    flags: 0,
1137                },
1138                PartitionInfo {
1139                    label: PART_2_NAME.to_string(),
1140                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
1141                    instance_guid: Guid::from_bytes(PART_2_INSTANCE_GUID),
1142                    start_block: 5,
1143                    num_blocks: 1,
1144                    flags: 0,
1145                },
1146            ],
1147        )
1148        .await;
1149        let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1150            .await
1151            .expect("load should succeed");
1152
1153        let part_0_dir = vfs::serve_directory(
1154            partitions_dir.clone(),
1155            vfs::Path::validate_and_split("part-000").unwrap(),
1156            fio::PERM_READABLE,
1157        );
1158        let part_1_dir = vfs::serve_directory(
1159            partitions_dir.clone(),
1160            vfs::Path::validate_and_split("part-001").unwrap(),
1161            fio::PERM_READABLE,
1162        );
1163        let part_0_proxy = connect_to_named_protocol_at_dir_root::<fpartitions::PartitionMarker>(
1164            &part_0_dir,
1165            "partition",
1166        )
1167        .expect("Failed to open Partition service");
1168        let part_1_proxy = connect_to_named_protocol_at_dir_root::<fpartitions::PartitionMarker>(
1169            &part_1_dir,
1170            "partition",
1171        )
1172        .expect("Failed to open Partition service");
1173
1174        let transaction = runner.create_transaction().await.expect("Failed to create transaction");
1175        part_0_proxy
1176            .update_metadata(fpartitions::PartitionUpdateMetadataRequest {
1177                transaction: Some(transaction.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()),
1178                type_guid: Some(fidl_fuchsia_hardware_block_partition::Guid {
1179                    value: [0xffu8; 16],
1180                }),
1181                ..Default::default()
1182            })
1183            .await
1184            .expect("FIDL error")
1185            .expect("Failed to update_metadata");
1186        part_1_proxy
1187            .update_metadata(fpartitions::PartitionUpdateMetadataRequest {
1188                transaction: Some(transaction.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()),
1189                flags: Some(1234),
1190                ..Default::default()
1191            })
1192            .await
1193            .expect("FIDL error")
1194            .expect("Failed to update_metadata");
1195        runner.commit_transaction(transaction).await.expect("Failed to commit transaction");
1196
1197        // Ensure the changes have propagated to the correct partitions.
1198        let part_0_block =
1199            connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&part_0_dir, "volume")
1200                .expect("Failed to open Volume service");
1201        let (status, guid) = part_0_block.get_type_guid().await.expect("FIDL error");
1202        assert_eq!(zx::Status::from_raw(status), zx::Status::OK);
1203        assert_eq!(guid.unwrap().value, [0xffu8; 16]);
1204        let part_1_block =
1205            connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&part_1_dir, "volume")
1206                .expect("Failed to open Volume service");
1207        let metadata =
1208            part_1_block.get_metadata().await.expect("FIDL error").expect("get_metadata failed");
1209        assert_eq!(metadata.type_guid.unwrap().value, PART_TYPE_GUID);
1210        assert_eq!(metadata.flags, Some(1234));
1211
1212        runner.shutdown().await;
1213    }
1214
1215    #[fuchsia::test]
1216    async fn commit_transaction_with_io_error() {
1217        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
1218        const PART_1_INSTANCE_GUID: [u8; 16] = [2u8; 16];
1219        const PART_1_NAME: &str = "part";
1220        const PART_2_INSTANCE_GUID: [u8; 16] = [3u8; 16];
1221        const PART_2_NAME: &str = "part2";
1222
1223        #[derive(Clone)]
1224        struct Observer(Arc<AtomicBool>);
1225        impl vmo_backed_block_server::Observer for Observer {
1226            fn write(
1227                &self,
1228                _device_block_offset: u64,
1229                _block_count: u32,
1230                _vmo: &Arc<zx::Vmo>,
1231                _vmo_offset: u64,
1232                _opts: WriteOptions,
1233            ) -> vmo_backed_block_server::WriteAction {
1234                if self.0.load(Ordering::Relaxed) {
1235                    vmo_backed_block_server::WriteAction::Fail
1236                } else {
1237                    vmo_backed_block_server::WriteAction::Write
1238                }
1239            }
1240        }
1241        let observer = Observer(Arc::new(AtomicBool::new(false)));
1242        let (block_device, partitions_dir) = setup_with_options(
1243            VmoBackedServerOptions {
1244                initial_contents: InitialContents::FromCapacity(16),
1245                block_size: 512,
1246                observer: Some(Box::new(observer.clone())),
1247                ..Default::default()
1248            },
1249            vec![
1250                PartitionInfo {
1251                    label: PART_1_NAME.to_string(),
1252                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
1253                    instance_guid: Guid::from_bytes(PART_1_INSTANCE_GUID),
1254                    start_block: 4,
1255                    num_blocks: 1,
1256                    flags: 0,
1257                },
1258                PartitionInfo {
1259                    label: PART_2_NAME.to_string(),
1260                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
1261                    instance_guid: Guid::from_bytes(PART_2_INSTANCE_GUID),
1262                    start_block: 5,
1263                    num_blocks: 1,
1264                    flags: 0,
1265                },
1266            ],
1267        )
1268        .await;
1269        let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1270            .await
1271            .expect("load should succeed");
1272
1273        let part_0_dir = vfs::serve_directory(
1274            partitions_dir.clone(),
1275            vfs::Path::validate_and_split("part-000").unwrap(),
1276            fio::PERM_READABLE,
1277        );
1278        let part_1_dir = vfs::serve_directory(
1279            partitions_dir.clone(),
1280            vfs::Path::validate_and_split("part-001").unwrap(),
1281            fio::PERM_READABLE,
1282        );
1283        let part_0_proxy = connect_to_named_protocol_at_dir_root::<fpartitions::PartitionMarker>(
1284            &part_0_dir,
1285            "partition",
1286        )
1287        .expect("Failed to open Partition service");
1288        let part_1_proxy = connect_to_named_protocol_at_dir_root::<fpartitions::PartitionMarker>(
1289            &part_1_dir,
1290            "partition",
1291        )
1292        .expect("Failed to open Partition service");
1293
1294        let transaction = runner.create_transaction().await.expect("Failed to create transaction");
1295        part_0_proxy
1296            .update_metadata(fpartitions::PartitionUpdateMetadataRequest {
1297                transaction: Some(transaction.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()),
1298                type_guid: Some(fidl_fuchsia_hardware_block_partition::Guid {
1299                    value: [0xffu8; 16],
1300                }),
1301                ..Default::default()
1302            })
1303            .await
1304            .expect("FIDL error")
1305            .expect("Failed to update_metadata");
1306        part_1_proxy
1307            .update_metadata(fpartitions::PartitionUpdateMetadataRequest {
1308                transaction: Some(transaction.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()),
1309                flags: Some(1234),
1310                ..Default::default()
1311            })
1312            .await
1313            .expect("FIDL error")
1314            .expect("Failed to update_metadata");
1315
1316        observer.0.store(true, Ordering::Relaxed); // Fail the next write
1317        runner.commit_transaction(transaction).await.expect_err("Commit transaction should fail");
1318
1319        // Ensure the changes did not get applied.
1320        let part_0_block =
1321            connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&part_0_dir, "volume")
1322                .expect("Failed to open Volume service");
1323        let (status, guid) = part_0_block.get_type_guid().await.expect("FIDL error");
1324        assert_eq!(zx::Status::from_raw(status), zx::Status::OK);
1325        assert_eq!(guid.unwrap().value, PART_TYPE_GUID);
1326        let part_1_block =
1327            connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&part_1_dir, "volume")
1328                .expect("Failed to open Volume service");
1329        let metadata =
1330            part_1_block.get_metadata().await.expect("FIDL error").expect("get_metadata failed");
1331        assert_eq!(metadata.type_guid.unwrap().value, PART_TYPE_GUID);
1332        assert_eq!(metadata.flags, Some(0));
1333
1334        runner.shutdown().await;
1335    }
1336
1337    #[fuchsia::test]
1338    async fn reset_partition_tables() {
1339        // The test will reset the tables from ["part", "part2"] to
1340        // ["part3", <empty>, "part4", <125 empty entries>].
1341        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
1342        const PART_1_INSTANCE_GUID: [u8; 16] = [2u8; 16];
1343        const PART_1_NAME: &str = "part";
1344        const PART_2_INSTANCE_GUID: [u8; 16] = [3u8; 16];
1345        const PART_2_NAME: &str = "part2";
1346        const PART_3_NAME: &str = "part3";
1347        const PART_4_NAME: &str = "part4";
1348
1349        let (block_device, partitions_dir) = setup(
1350            512,
1351            1048576 / 512,
1352            vec![
1353                PartitionInfo {
1354                    label: PART_1_NAME.to_string(),
1355                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
1356                    instance_guid: Guid::from_bytes(PART_1_INSTANCE_GUID),
1357                    start_block: 4,
1358                    num_blocks: 1,
1359                    flags: 0,
1360                },
1361                PartitionInfo {
1362                    label: PART_2_NAME.to_string(),
1363                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
1364                    instance_guid: Guid::from_bytes(PART_2_INSTANCE_GUID),
1365                    start_block: 5,
1366                    num_blocks: 1,
1367                    flags: 0,
1368                },
1369            ],
1370        )
1371        .await;
1372        let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1373            .await
1374            .expect("load should succeed");
1375        let nil_entry = PartitionInfo {
1376            label: "".to_string(),
1377            type_guid: Guid::from_bytes([0u8; 16]),
1378            instance_guid: Guid::from_bytes([0u8; 16]),
1379            start_block: 0,
1380            num_blocks: 0,
1381            flags: 0,
1382        };
1383        let mut new_partitions = vec![nil_entry; 128];
1384        new_partitions[0] = PartitionInfo {
1385            label: PART_3_NAME.to_string(),
1386            type_guid: Guid::from_bytes(PART_TYPE_GUID),
1387            instance_guid: Guid::from_bytes([1u8; 16]),
1388            start_block: 64,
1389            num_blocks: 2,
1390            flags: 0,
1391        };
1392        new_partitions[2] = PartitionInfo {
1393            label: PART_4_NAME.to_string(),
1394            type_guid: Guid::from_bytes(PART_TYPE_GUID),
1395            instance_guid: Guid::from_bytes([2u8; 16]),
1396            start_block: 66,
1397            num_blocks: 4,
1398            flags: 0,
1399        };
1400        runner.reset_partition_table(new_partitions).await.expect("reset_partition_table failed");
1401        partitions_dir.get_entry("part-000").expect("No entry found");
1402        partitions_dir.get_entry("part-001").map(|_| ()).expect_err("Extra entry found");
1403        partitions_dir.get_entry("part-002").expect("No entry found");
1404
1405        let proxy = vfs::serve_directory(
1406            partitions_dir.clone(),
1407            vfs::path::Path::validate_and_split("part-000").unwrap(),
1408            fio::PERM_READABLE,
1409        );
1410        let block =
1411            connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&proxy, "volume")
1412                .expect("Failed to open block service");
1413        let (status, name) = block.get_name().await.expect("FIDL error");
1414        assert_eq!(zx::Status::from_raw(status), zx::Status::OK);
1415        assert_eq!(name.unwrap(), PART_3_NAME);
1416
1417        runner.shutdown().await;
1418    }
1419
1420    #[fuchsia::test]
1421    async fn reset_partition_tables_fails_if_too_many_partitions() {
1422        let (block_device, partitions_dir) = setup(512, 8, vec![]).await;
1423        let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1424            .await
1425            .expect("load should succeed");
1426        let nil_entry = PartitionInfo {
1427            label: "".to_string(),
1428            type_guid: Guid::from_bytes([0u8; 16]),
1429            instance_guid: Guid::from_bytes([0u8; 16]),
1430            start_block: 0,
1431            num_blocks: 0,
1432            flags: 0,
1433        };
1434        let new_partitions = vec![nil_entry; 128];
1435        runner
1436            .reset_partition_table(new_partitions)
1437            .await
1438            .expect_err("reset_partition_table should fail");
1439
1440        runner.shutdown().await;
1441    }
1442
1443    #[fuchsia::test]
1444    async fn reset_partition_tables_fails_if_too_large_partitions() {
1445        let (block_device, partitions_dir) = setup(512, 64, vec![]).await;
1446        let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1447            .await
1448            .expect("load should succeed");
1449        let new_partitions = vec![
1450            PartitionInfo {
1451                label: "a".to_string(),
1452                type_guid: Guid::from_bytes([1u8; 16]),
1453                instance_guid: Guid::from_bytes([1u8; 16]),
1454                start_block: 4,
1455                num_blocks: 2,
1456                flags: 0,
1457            },
1458            PartitionInfo {
1459                label: "b".to_string(),
1460                type_guid: Guid::from_bytes([2u8; 16]),
1461                instance_guid: Guid::from_bytes([2u8; 16]),
1462                start_block: 6,
1463                num_blocks: 200,
1464                flags: 0,
1465            },
1466        ];
1467        runner
1468            .reset_partition_table(new_partitions)
1469            .await
1470            .expect_err("reset_partition_table should fail");
1471
1472        runner.shutdown().await;
1473    }
1474
1475    #[fuchsia::test]
1476    async fn reset_partition_tables_fails_if_partition_overlaps_metadata() {
1477        let (block_device, partitions_dir) = setup(512, 64, vec![]).await;
1478        let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1479            .await
1480            .expect("load should succeed");
1481        let new_partitions = vec![PartitionInfo {
1482            label: "a".to_string(),
1483            type_guid: Guid::from_bytes([1u8; 16]),
1484            instance_guid: Guid::from_bytes([1u8; 16]),
1485            start_block: 1,
1486            num_blocks: 2,
1487            flags: 0,
1488        }];
1489        runner
1490            .reset_partition_table(new_partitions)
1491            .await
1492            .expect_err("reset_partition_table should fail");
1493
1494        runner.shutdown().await;
1495    }
1496
1497    #[fuchsia::test]
1498    async fn reset_partition_tables_fails_if_partitions_overlap() {
1499        let (block_device, partitions_dir) = setup(512, 64, vec![]).await;
1500        let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1501            .await
1502            .expect("load should succeed");
1503        let new_partitions = vec![
1504            PartitionInfo {
1505                label: "a".to_string(),
1506                type_guid: Guid::from_bytes([1u8; 16]),
1507                instance_guid: Guid::from_bytes([1u8; 16]),
1508                start_block: 32,
1509                num_blocks: 2,
1510                flags: 0,
1511            },
1512            PartitionInfo {
1513                label: "b".to_string(),
1514                type_guid: Guid::from_bytes([2u8; 16]),
1515                instance_guid: Guid::from_bytes([2u8; 16]),
1516                start_block: 33,
1517                num_blocks: 1,
1518                flags: 0,
1519            },
1520        ];
1521        runner
1522            .reset_partition_table(new_partitions)
1523            .await
1524            .expect_err("reset_partition_table should fail");
1525
1526        runner.shutdown().await;
1527    }
1528
1529    #[fuchsia::test]
1530    async fn add_partition() {
1531        let (block_device, partitions_dir) = setup(512, 64, vec![PartitionInfo::nil(); 64]).await;
1532        let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1533            .await
1534            .expect("load should succeed");
1535
1536        let transaction = runner.create_transaction().await.expect("Create transaction failed");
1537        let request = fpartitions::PartitionsManagerAddPartitionRequest {
1538            transaction: Some(transaction.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()),
1539            name: Some("a".to_string()),
1540            type_guid: Some(fidl_fuchsia_hardware_block_partition::Guid { value: [1u8; 16] }),
1541            num_blocks: Some(2),
1542            ..Default::default()
1543        };
1544        runner.add_partition(request).await.expect("add_partition failed");
1545        runner.commit_transaction(transaction).await.expect("add_partition failed");
1546
1547        let proxy = vfs::serve_directory(
1548            partitions_dir.clone(),
1549            vfs::path::Path::validate_and_split("part-000").unwrap(),
1550            fio::PERM_READABLE,
1551        );
1552        let block =
1553            connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&proxy, "volume")
1554                .expect("Failed to open block service");
1555        let client = RemoteBlockClient::new(block).await.expect("Failed to create block client");
1556
1557        assert_eq!(client.block_count(), 2);
1558        assert_eq!(client.block_size(), 512);
1559
1560        runner.shutdown().await;
1561    }
1562
1563    #[fuchsia::test]
1564    async fn partition_info() {
1565        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
1566        const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
1567        const PART_NAME: &str = "part";
1568
1569        let (block_device, partitions_dir) = setup_with_options(
1570            VmoBackedServerOptions {
1571                initial_contents: InitialContents::FromCapacity(16),
1572                block_size: 512,
1573                info: DeviceInfo::Block(BlockInfo {
1574                    max_transfer_blocks: NonZero::new(2),
1575                    device_flags: fblock::Flag::READONLY | fblock::Flag::REMOVABLE,
1576                    ..Default::default()
1577                }),
1578                ..Default::default()
1579            },
1580            vec![PartitionInfo {
1581                label: PART_NAME.to_string(),
1582                type_guid: Guid::from_bytes(PART_TYPE_GUID),
1583                instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
1584                start_block: 4,
1585                num_blocks: 1,
1586                flags: 0xabcd,
1587            }],
1588        )
1589        .await;
1590
1591        let partitions_dir_clone = partitions_dir.clone();
1592        let runner = GptManager::new(block_device.connect(), partitions_dir_clone)
1593            .await
1594            .expect("load should succeed");
1595
1596        let part_dir = vfs::serve_directory(
1597            partitions_dir.clone(),
1598            vfs::path::Path::validate_and_split("part-000").unwrap(),
1599            fio::PERM_READABLE,
1600        );
1601        let part_block =
1602            connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&part_dir, "volume")
1603                .expect("Failed to open Volume service");
1604        let info = part_block.get_info().await.expect("FIDL error").expect("get_info failed");
1605        assert_eq!(info.block_count, 1);
1606        assert_eq!(info.block_size, 512);
1607        assert_eq!(info.flags, fblock::Flag::READONLY | fblock::Flag::REMOVABLE);
1608        assert_eq!(info.max_transfer_size, 1024);
1609
1610        let metadata =
1611            part_block.get_metadata().await.expect("FIDL error").expect("get_metadata failed");
1612        assert_eq!(metadata.name, Some(PART_NAME.to_string()));
1613        assert_eq!(metadata.type_guid.unwrap().value, PART_TYPE_GUID);
1614        assert_eq!(metadata.instance_guid.unwrap().value, PART_INSTANCE_GUID);
1615        assert_eq!(metadata.start_block_offset, Some(4));
1616        assert_eq!(metadata.num_blocks, Some(1));
1617        assert_eq!(metadata.flags, Some(0xabcd));
1618
1619        runner.shutdown().await;
1620    }
1621
1622    #[fuchsia::test]
1623    async fn nested_gpt() {
1624        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
1625        const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
1626        const PART_NAME: &str = "part";
1627
1628        let vmo = zx::Vmo::create(64 * 512).unwrap();
1629        let vmo_clone = vmo.create_child(zx::VmoChildOptions::REFERENCE, 0, 0).unwrap();
1630        let (outer_block_device, outer_partitions_dir) = setup_with_options(
1631            VmoBackedServerOptions {
1632                initial_contents: InitialContents::FromVmo(vmo_clone),
1633                block_size: 512,
1634                info: DeviceInfo::Block(BlockInfo {
1635                    device_flags: fblock::Flag::READONLY | fblock::Flag::REMOVABLE,
1636                    ..Default::default()
1637                }),
1638                ..Default::default()
1639            },
1640            vec![PartitionInfo {
1641                label: PART_NAME.to_string(),
1642                type_guid: Guid::from_bytes(PART_TYPE_GUID),
1643                instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
1644                start_block: 4,
1645                num_blocks: 16,
1646                flags: 0xabcd,
1647            }],
1648        )
1649        .await;
1650
1651        let outer_partitions_dir_clone = outer_partitions_dir.clone();
1652        let outer_runner =
1653            GptManager::new(outer_block_device.connect(), outer_partitions_dir_clone)
1654                .await
1655                .expect("load should succeed");
1656
1657        let outer_part_dir = vfs::serve_directory(
1658            outer_partitions_dir.clone(),
1659            vfs::path::Path::validate_and_split("part-000").unwrap(),
1660            fio::PERM_READABLE,
1661        );
1662        let part_block =
1663            connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&outer_part_dir, "volume")
1664                .expect("Failed to open Block service");
1665
1666        let client = Arc::new(RemoteBlockClient::new(part_block.clone()).await.unwrap());
1667        let _ = gpt::Gpt::format(
1668            client,
1669            vec![PartitionInfo {
1670                label: PART_NAME.to_string(),
1671                type_guid: Guid::from_bytes(PART_TYPE_GUID),
1672                instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
1673                start_block: 5,
1674                num_blocks: 1,
1675                flags: 0xabcd,
1676            }],
1677        )
1678        .await
1679        .unwrap();
1680
1681        let partitions_dir = vfs::directory::immutable::simple();
1682        let partitions_dir_clone = partitions_dir.clone();
1683        let runner =
1684            GptManager::new(part_block, partitions_dir_clone).await.expect("load should succeed");
1685        let part_dir = vfs::serve_directory(
1686            partitions_dir.clone(),
1687            vfs::path::Path::validate_and_split("part-000").unwrap(),
1688            fio::PERM_READABLE,
1689        );
1690        let inner_part_block =
1691            connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&part_dir, "volume")
1692                .expect("Failed to open Block service");
1693
1694        let client =
1695            RemoteBlockClient::new(inner_part_block).await.expect("Failed to create block client");
1696        assert_eq!(client.block_count(), 1);
1697        assert_eq!(client.block_size(), 512);
1698
1699        let buffer = vec![0xaa; 512];
1700        client.write_at(BufferSlice::Memory(&buffer), 0).await.unwrap();
1701        client
1702            .write_at(BufferSlice::Memory(&buffer), 512)
1703            .await
1704            .expect_err("Write past end should fail");
1705        client.flush().await.unwrap();
1706
1707        runner.shutdown().await;
1708        outer_runner.shutdown().await;
1709
1710        // Check that the write targeted the correct block (4 + 5 = 9)
1711        let data = vmo.read_to_vec(9 * 512, 512).unwrap();
1712        assert_eq!(&data[..], &buffer[..]);
1713    }
1714
1715    #[fuchsia::test]
1716    async fn offset_map_does_not_allow_partition_overwrite() {
1717        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
1718        const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
1719        const PART_NAME: &str = "part";
1720
1721        let (block_device, partitions_dir) = setup_with_options(
1722            VmoBackedServerOptions {
1723                initial_contents: InitialContents::FromCapacity(16),
1724                block_size: 512,
1725                info: DeviceInfo::Block(BlockInfo {
1726                    device_flags: fblock::Flag::READONLY | fblock::Flag::REMOVABLE,
1727                    ..Default::default()
1728                }),
1729                ..Default::default()
1730            },
1731            vec![PartitionInfo {
1732                label: PART_NAME.to_string(),
1733                type_guid: Guid::from_bytes(PART_TYPE_GUID),
1734                instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
1735                start_block: 4,
1736                num_blocks: 2,
1737                flags: 0xabcd,
1738            }],
1739        )
1740        .await;
1741
1742        let partitions_dir_clone = partitions_dir.clone();
1743        let runner = GptManager::new(block_device.connect(), partitions_dir_clone)
1744            .await
1745            .expect("load should succeed");
1746
1747        let part_dir = vfs::serve_directory(
1748            partitions_dir.clone(),
1749            vfs::path::Path::validate_and_split("part-000").unwrap(),
1750            fio::PERM_READABLE,
1751        );
1752
1753        let part_block =
1754            connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&part_dir, "volume")
1755                .expect("Failed to open Block service");
1756
1757        // Attempting to open a session with an offset map that extends past the end of the device
1758        // should fail.
1759        let (session, server_end) = fidl::endpoints::create_proxy::<fblock::SessionMarker>();
1760        part_block
1761            .open_session_with_offset_map(
1762                server_end,
1763                &fblock::BlockOffsetMapping {
1764                    source_block_offset: 0,
1765                    target_block_offset: 1,
1766                    length: 2,
1767                },
1768            )
1769            .expect("FIDL error");
1770        session.get_fifo().await.expect_err("Session should be closed");
1771
1772        let (session, server_end) = fidl::endpoints::create_proxy::<fblock::SessionMarker>();
1773        part_block
1774            .open_session_with_offset_map(
1775                server_end,
1776                &fblock::BlockOffsetMapping {
1777                    source_block_offset: 0,
1778                    target_block_offset: 0,
1779                    length: 3,
1780                },
1781            )
1782            .expect("FIDL error");
1783        session.get_fifo().await.expect_err("Session should be closed");
1784
1785        runner.shutdown().await;
1786    }
1787}