gpt_component/
gpt.rs

1// Copyright 2024 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use crate::partition::PartitionBackend;
6use crate::partitions_directory::PartitionsDirectory;
7use anyhow::{anyhow, Context as _, Error};
8use block_client::{
9    BlockClient as _, BufferSlice, MutableBufferSlice, RemoteBlockClient, VmoId, WriteOptions,
10};
11use block_server::async_interface::SessionManager;
12use block_server::BlockServer;
13
14use fidl::endpoints::ServerEnd;
15use futures::lock::Mutex;
16use futures::stream::TryStreamExt as _;
17use std::collections::BTreeMap;
18use std::num::NonZero;
19use std::ops::Range;
20use std::sync::atomic::{AtomicBool, Ordering};
21use std::sync::{Arc, Weak};
22use zx::AsHandleRef as _;
23use {
24    fidl_fuchsia_hardware_block as fblock, fidl_fuchsia_storage_partitions as fpartitions,
25    fuchsia_async as fasync,
26};
27
28fn partition_directory_entry_name(index: u32) -> String {
29    format!("part-{:03}", index)
30}
31
32/// A single partition in a GPT device.
33pub struct GptPartition {
34    gpt: Weak<GptManager>,
35    block_client: Arc<RemoteBlockClient>,
36    block_range: Range<u64>,
37    index: u32,
38}
39
40fn trace_id(trace_flow_id: Option<NonZero<u64>>) -> u64 {
41    trace_flow_id.map(|v| v.get()).unwrap_or_default()
42}
43
44impl GptPartition {
45    pub fn new(
46        gpt: &Arc<GptManager>,
47        block_client: Arc<RemoteBlockClient>,
48        index: u32,
49        block_range: Range<u64>,
50    ) -> Arc<Self> {
51        debug_assert!(block_range.end >= block_range.start);
52        Arc::new(Self { gpt: Arc::downgrade(gpt), block_client, block_range, index })
53    }
54
55    pub async fn terminate(&self) {
56        if let Err(error) = self.block_client.close().await {
57            log::warn!(error:?; "Failed to close block client");
58        }
59    }
60
61    pub fn index(&self) -> u32 {
62        self.index
63    }
64
65    pub fn block_size(&self) -> u32 {
66        self.block_client.block_size()
67    }
68
69    pub fn block_count(&self) -> u64 {
70        self.block_range.end - self.block_range.start
71    }
72
73    pub async fn attach_vmo(&self, vmo: &zx::Vmo) -> Result<VmoId, zx::Status> {
74        self.block_client.attach_vmo(vmo).await
75    }
76
77    pub async fn detach_vmo(&self, vmoid: VmoId) -> Result<(), zx::Status> {
78        self.block_client.detach_vmo(vmoid).await
79    }
80
81    pub fn open_passthrough_session(&self, session: ServerEnd<fblock::SessionMarker>) {
82        if let Some(gpt) = self.gpt.upgrade() {
83            let mappings = [fblock::BlockOffsetMapping {
84                source_block_offset: 0,
85                target_block_offset: self.block_range.start,
86                length: self.block_count(),
87            }];
88            if let Err(err) =
89                gpt.block_proxy.open_session_with_offset_map(session, None, Some(&mappings[..]))
90            {
91                // Client errors normally come back on `session` but that was already consumed.  The
92                // client will get a PEER_CLOSED without an epitaph.
93                log::warn!(err:?; "Failed to open passthrough session");
94            }
95        } else {
96            if let Err(err) = session.close_with_epitaph(zx::Status::BAD_STATE) {
97                log::warn!(err:?; "Failed to send session epitaph");
98            }
99        }
100    }
101
102    pub async fn get_info(&self) -> Result<block_server::DeviceInfo, zx::Status> {
103        if let Some(gpt) = self.gpt.upgrade() {
104            gpt.inner
105                .lock()
106                .await
107                .gpt
108                .partitions()
109                .get(&self.index)
110                .map(|info| convert_partition_info(info, self.block_client.block_flags()))
111                .ok_or(zx::Status::BAD_STATE)
112        } else {
113            Err(zx::Status::BAD_STATE)
114        }
115    }
116
117    pub async fn read(
118        &self,
119        device_block_offset: u64,
120        block_count: u32,
121        vmo_id: &VmoId,
122        vmo_offset: u64, // *bytes* not blocks
123        trace_flow_id: Option<NonZero<u64>>,
124    ) -> Result<(), zx::Status> {
125        let dev_offset = self
126            .absolute_offset(device_block_offset, block_count)
127            .map(|offset| offset * self.block_size() as u64)?;
128        let buffer = MutableBufferSlice::new_with_vmo_id(
129            vmo_id,
130            vmo_offset,
131            (block_count * self.block_size()) as u64,
132        );
133        self.block_client.read_at_traced(buffer, dev_offset, trace_id(trace_flow_id)).await
134    }
135
136    pub async fn write(
137        &self,
138        device_block_offset: u64,
139        block_count: u32,
140        vmo_id: &VmoId,
141        vmo_offset: u64, // *bytes* not blocks
142        opts: WriteOptions,
143        trace_flow_id: Option<NonZero<u64>>,
144    ) -> Result<(), zx::Status> {
145        let dev_offset = self
146            .absolute_offset(device_block_offset, block_count)
147            .map(|offset| offset * self.block_size() as u64)?;
148        let buffer = BufferSlice::new_with_vmo_id(
149            vmo_id,
150            vmo_offset,
151            (block_count * self.block_size()) as u64,
152        );
153        self.block_client
154            .write_at_with_opts_traced(buffer, dev_offset, opts, trace_id(trace_flow_id))
155            .await
156    }
157
158    pub async fn flush(&self, trace_flow_id: Option<NonZero<u64>>) -> Result<(), zx::Status> {
159        self.block_client.flush_traced(trace_id(trace_flow_id)).await
160    }
161
162    pub async fn trim(
163        &self,
164        device_block_offset: u64,
165        block_count: u32,
166        trace_flow_id: Option<NonZero<u64>>,
167    ) -> Result<(), zx::Status> {
168        let dev_offset = self
169            .absolute_offset(device_block_offset, block_count)
170            .map(|offset| offset * self.block_size() as u64)?;
171        let len = block_count as u64 * self.block_size() as u64;
172        self.block_client.trim_traced(dev_offset..dev_offset + len, trace_id(trace_flow_id)).await
173    }
174
175    // Converts a relative range specified by [offset, offset+len) into an absolute offset in the
176    // GPT device, performing bounds checking within the partition.  Returns ZX_ERR_OUT_OF_RANGE for
177    // an invalid offset/len.
178    fn absolute_offset(&self, mut offset: u64, len: u32) -> Result<u64, zx::Status> {
179        offset = offset.checked_add(self.block_range.start).ok_or(zx::Status::OUT_OF_RANGE)?;
180        let end = offset.checked_add(len as u64).ok_or(zx::Status::OUT_OF_RANGE)?;
181        if end > self.block_range.end {
182            Err(zx::Status::OUT_OF_RANGE)
183        } else {
184            Ok(offset)
185        }
186    }
187}
188
189fn convert_partition_info(
190    info: &gpt::PartitionInfo,
191    device_flags: fblock::Flag,
192) -> block_server::DeviceInfo {
193    block_server::DeviceInfo::Partition(block_server::PartitionInfo {
194        device_flags,
195        block_range: Some(info.start_block..info.start_block + info.num_blocks),
196        type_guid: info.type_guid.to_bytes(),
197        instance_guid: info.instance_guid.to_bytes(),
198        name: info.label.clone(),
199        flags: info.flags,
200    })
201}
202
203struct PendingTransaction {
204    transaction: gpt::Transaction,
205    client_koid: zx::Koid,
206    // A list of indexes for partitions which were added in the transaction.  When committing, all
207    // newly created partitions are published.
208    added_partitions: Vec<u32>,
209    // A task which waits for the client end to be closed and clears the pending transaction.
210    _signal_task: fasync::Task<()>,
211}
212
213struct Inner {
214    gpt: gpt::Gpt,
215    partitions: BTreeMap<u32, Arc<BlockServer<SessionManager<PartitionBackend>>>>,
216    // Exposes all partitions for discovery by other components.  Should be kept in sync with
217    // `partitions`.
218    partitions_dir: PartitionsDirectory,
219    pending_transaction: Option<PendingTransaction>,
220}
221
222impl Inner {
223    /// Ensures that `transaction` matches our pending transaction.
224    fn ensure_transaction_matches(&self, transaction: &zx::EventPair) -> Result<(), zx::Status> {
225        if let Some(pending) = self.pending_transaction.as_ref() {
226            if transaction.get_koid()? == pending.client_koid {
227                Ok(())
228            } else {
229                Err(zx::Status::BAD_HANDLE)
230            }
231        } else {
232            Err(zx::Status::BAD_STATE)
233        }
234    }
235
236    async fn bind_partition(
237        &mut self,
238        parent: &Arc<GptManager>,
239        index: u32,
240        info: gpt::PartitionInfo,
241    ) -> Result<(), Error> {
242        log::info!("GPT part {index}: {info:?}");
243        let partition = PartitionBackend::new(GptPartition::new(
244            parent,
245            self.gpt.client().clone(),
246            index,
247            info.start_block
248                ..info
249                    .start_block
250                    .checked_add(info.num_blocks)
251                    .ok_or_else(|| anyhow!("Overflow in partition range"))?,
252        ));
253        let block_server = Arc::new(BlockServer::new(parent.block_size, partition));
254        self.partitions_dir.add_entry(
255            &partition_directory_entry_name(index),
256            Arc::downgrade(&block_server),
257            Arc::downgrade(parent),
258            index as usize,
259        );
260        self.partitions.insert(index, block_server);
261        Ok(())
262    }
263
264    async fn bind_all_partitions(&mut self, parent: &Arc<GptManager>) -> Result<(), Error> {
265        self.partitions.clear();
266        self.partitions_dir.clear();
267        for (index, info) in self.gpt.partitions().clone() {
268            self.bind_partition(parent, index, info).await?;
269        }
270        Ok(())
271    }
272
273    fn add_partition(&mut self, info: gpt::PartitionInfo) -> Result<usize, gpt::AddPartitionError> {
274        let pending = self.pending_transaction.as_mut().unwrap();
275        let idx = self.gpt.add_partition(&mut pending.transaction, info)?;
276        pending.added_partitions.push(idx as u32);
277        Ok(idx)
278    }
279}
280
281/// Runs a GPT device.
282pub struct GptManager {
283    block_proxy: fblock::BlockProxy,
284    block_size: u32,
285    block_count: u64,
286    inner: Mutex<Inner>,
287    shutdown: AtomicBool,
288}
289
290impl std::fmt::Debug for GptManager {
291    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
292        f.debug_struct("GptManager")
293            .field("block_size", &self.block_size)
294            .field("block_count", &self.block_count)
295            .finish()
296    }
297}
298
299impl GptManager {
300    pub async fn new(
301        block_proxy: fblock::BlockProxy,
302        partitions_dir: Arc<vfs::directory::immutable::Simple>,
303    ) -> Result<Arc<Self>, Error> {
304        log::info!("Binding to GPT");
305        let client = Arc::new(RemoteBlockClient::new(block_proxy.clone()).await?);
306        let block_size = client.block_size();
307        let block_count = client.block_count();
308        let gpt = gpt::Gpt::open(client).await.context("Failed to load GPT")?;
309
310        let this = Arc::new(Self {
311            block_proxy,
312            block_size,
313            block_count,
314            inner: Mutex::new(Inner {
315                gpt,
316                partitions: BTreeMap::new(),
317                partitions_dir: PartitionsDirectory::new(partitions_dir),
318                pending_transaction: None,
319            }),
320            shutdown: AtomicBool::new(false),
321        });
322        log::info!("Bind to GPT OK, binding partitions");
323        this.inner.lock().await.bind_all_partitions(&this).await?;
324        log::info!("Starting all partitions OK!");
325        Ok(this)
326    }
327
328    pub fn block_size(&self) -> u32 {
329        self.block_size
330    }
331
332    pub fn block_count(&self) -> u64 {
333        self.block_count
334    }
335
336    pub async fn create_transaction(self: &Arc<Self>) -> Result<zx::EventPair, zx::Status> {
337        let mut inner = self.inner.lock().await;
338        if inner.pending_transaction.is_some() {
339            return Err(zx::Status::ALREADY_EXISTS);
340        }
341        let transaction = inner.gpt.create_transaction().unwrap();
342        let (client_end, server_end) = zx::EventPair::create();
343        let client_koid = client_end.get_koid()?;
344        let signal_waiter = fasync::OnSignals::new(server_end, zx::Signals::EVENTPAIR_PEER_CLOSED);
345        let this = self.clone();
346        let task = fasync::Task::spawn(async move {
347            let _ = signal_waiter.await;
348            let mut inner = this.inner.lock().await;
349            if inner.pending_transaction.as_ref().map_or(false, |t| t.client_koid == client_koid) {
350                inner.pending_transaction = None;
351            }
352        });
353        inner.pending_transaction = Some(PendingTransaction {
354            transaction,
355            client_koid,
356            added_partitions: vec![],
357            _signal_task: task,
358        });
359        Ok(client_end)
360    }
361
362    pub async fn commit_transaction(
363        self: &Arc<Self>,
364        transaction: zx::EventPair,
365    ) -> Result<(), zx::Status> {
366        let mut inner = self.inner.lock().await;
367        inner.ensure_transaction_matches(&transaction)?;
368        let pending = std::mem::take(&mut inner.pending_transaction).unwrap();
369        if let Err(err) = inner.gpt.commit_transaction(pending.transaction).await {
370            log::error!(err:?; "Failed to commit transaction");
371            return Err(zx::Status::IO);
372        }
373        for idx in pending.added_partitions {
374            let info = inner.gpt.partitions().get(&idx).ok_or(zx::Status::BAD_STATE)?.clone();
375            inner.bind_partition(self, idx, info).await.map_err(|err| {
376                log::error!(err:?; "Failed to bind partition");
377                zx::Status::BAD_STATE
378            })?;
379        }
380        Ok(())
381    }
382
383    pub async fn add_partition(
384        &self,
385        request: fpartitions::PartitionsManagerAddPartitionRequest,
386    ) -> Result<(), zx::Status> {
387        let mut inner = self.inner.lock().await;
388        inner.ensure_transaction_matches(
389            request.transaction.as_ref().ok_or(zx::Status::BAD_HANDLE)?,
390        )?;
391        let info = gpt::PartitionInfo {
392            label: request.name.ok_or(zx::Status::INVALID_ARGS)?,
393            type_guid: request
394                .type_guid
395                .map(|value| gpt::Guid::from_bytes(value.value))
396                .ok_or(zx::Status::INVALID_ARGS)?,
397            instance_guid: request
398                .instance_guid
399                .map(|value| gpt::Guid::from_bytes(value.value))
400                .unwrap_or_else(|| gpt::Guid::generate()),
401            start_block: 0,
402            num_blocks: request.num_blocks.ok_or(zx::Status::INVALID_ARGS)?,
403            flags: request.flags.unwrap_or_default(),
404        };
405        let idx = inner.add_partition(info)?;
406        let partition =
407            inner.pending_transaction.as_ref().unwrap().transaction.partitions.get(idx).unwrap();
408        log::info!(
409            "Allocated partition {:?} at {:?}",
410            partition.label,
411            partition.start_block..partition.start_block + partition.num_blocks
412        );
413        Ok(())
414    }
415
416    pub async fn handle_partitions_requests(
417        &self,
418        gpt_index: usize,
419        mut requests: fpartitions::PartitionRequestStream,
420    ) -> Result<(), zx::Status> {
421        while let Some(request) = requests.try_next().await.unwrap() {
422            match request {
423                fpartitions::PartitionRequest::UpdateMetadata { payload, responder } => {
424                    responder
425                        .send(
426                            self.update_partition_metadata(gpt_index, payload)
427                                .await
428                                .map_err(|status| status.into_raw()),
429                        )
430                        .unwrap_or_else(
431                            |err| log::error!(err:?; "Failed to send UpdateMetadata response"),
432                        );
433                }
434            }
435        }
436        Ok(())
437    }
438
439    async fn update_partition_metadata(
440        &self,
441        gpt_index: usize,
442        request: fpartitions::PartitionUpdateMetadataRequest,
443    ) -> Result<(), zx::Status> {
444        let mut inner = self.inner.lock().await;
445        inner.ensure_transaction_matches(
446            request.transaction.as_ref().ok_or(zx::Status::BAD_HANDLE)?,
447        )?;
448
449        let transaction = &mut inner.pending_transaction.as_mut().unwrap().transaction;
450        let entry = transaction.partitions.get_mut(gpt_index).ok_or(zx::Status::BAD_STATE)?;
451        if let Some(type_guid) = request.type_guid.as_ref().cloned() {
452            entry.type_guid = gpt::Guid::from_bytes(type_guid.value);
453        }
454        if let Some(flags) = request.flags.as_ref() {
455            entry.flags = *flags;
456        }
457        Ok(())
458    }
459
460    pub async fn reset_partition_table(
461        self: &Arc<Self>,
462        partitions: Vec<gpt::PartitionInfo>,
463    ) -> Result<(), zx::Status> {
464        let mut inner = self.inner.lock().await;
465        if inner.pending_transaction.is_some() {
466            return Err(zx::Status::BAD_STATE);
467        }
468
469        log::info!("Resetting gpt.  Expect data loss!!!");
470        let mut transaction = inner.gpt.create_transaction().unwrap();
471        transaction.partitions = partitions;
472        inner.gpt.commit_transaction(transaction).await?;
473
474        log::info!("Rebinding partitions...");
475        if let Err(err) = inner.bind_all_partitions(&self).await {
476            log::error!(err:?; "Failed to rebind partitions");
477            return Err(zx::Status::BAD_STATE);
478        }
479        log::info!("Rebinding partitions OK!");
480        Ok(())
481    }
482
483    pub async fn shutdown(self: Arc<Self>) {
484        log::info!("Shutting down gpt");
485        let mut inner = self.inner.lock().await;
486        inner.partitions_dir.clear();
487        inner.partitions.clear();
488        self.shutdown.store(true, Ordering::Relaxed);
489        log::info!("Shutting down gpt OK");
490    }
491}
492
493impl Drop for GptManager {
494    fn drop(&mut self) {
495        assert!(self.shutdown.load(Ordering::Relaxed), "Did you forget to shutdown?");
496    }
497}
498
499#[cfg(test)]
500mod tests {
501    use super::GptManager;
502    use block_client::{BlockClient as _, BufferSlice, MutableBufferSlice, RemoteBlockClient};
503    use block_server::WriteOptions;
504    use fake_block_server::{FakeServer, FakeServerOptions};
505    use fidl::HandleBased as _;
506    use fuchsia_component::client::connect_to_named_protocol_at_dir_root;
507    use gpt::{Gpt, Guid, PartitionInfo};
508    use std::sync::atomic::{AtomicBool, Ordering};
509    use std::sync::Arc;
510    use {
511        fidl_fuchsia_hardware_block as fblock, fidl_fuchsia_hardware_block_volume as fvolume,
512        fidl_fuchsia_io as fio, fidl_fuchsia_storage_partitions as fpartitions,
513        fuchsia_async as fasync,
514    };
515
516    async fn setup(
517        block_size: u32,
518        block_count: u64,
519        partitions: Vec<PartitionInfo>,
520    ) -> (Arc<FakeServer>, Arc<vfs::directory::immutable::Simple>) {
521        setup_with_options(
522            FakeServerOptions { block_count: Some(block_count), block_size, ..Default::default() },
523            partitions,
524        )
525        .await
526    }
527
528    async fn setup_with_options(
529        opts: FakeServerOptions<'_>,
530        partitions: Vec<PartitionInfo>,
531    ) -> (Arc<FakeServer>, Arc<vfs::directory::immutable::Simple>) {
532        let server = Arc::new(FakeServer::from(opts));
533        {
534            let (block_client, block_server) =
535                fidl::endpoints::create_proxy::<fblock::BlockMarker>();
536            let volume_stream = fidl::endpoints::ServerEnd::<fvolume::VolumeMarker>::from(
537                block_server.into_channel(),
538            )
539            .into_stream();
540            let server_clone = server.clone();
541            let _task = fasync::Task::spawn(async move { server_clone.serve(volume_stream).await });
542            let client = Arc::new(RemoteBlockClient::new(block_client).await.unwrap());
543            Gpt::format(client, partitions).await.unwrap();
544        }
545        (server, vfs::directory::immutable::simple())
546    }
547
548    #[fuchsia::test]
549    async fn load_unformatted_gpt() {
550        let vmo = zx::Vmo::create(4096).unwrap();
551        let server = Arc::new(FakeServer::from_vmo(512, vmo));
552
553        GptManager::new(server.block_proxy(), vfs::directory::immutable::simple())
554            .await
555            .expect_err("load should fail");
556    }
557
558    #[fuchsia::test]
559    async fn load_formatted_empty_gpt() {
560        let (block_device, partitions_dir) = setup(512, 8, vec![]).await;
561
562        let runner = GptManager::new(block_device.block_proxy(), partitions_dir)
563            .await
564            .expect("load should succeed");
565        runner.shutdown().await;
566    }
567
568    #[fuchsia::test]
569    async fn load_formatted_gpt_with_one_partition() {
570        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
571        const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
572        const PART_NAME: &str = "part";
573
574        let (block_device, partitions_dir) = setup(
575            512,
576            8,
577            vec![PartitionInfo {
578                label: PART_NAME.to_string(),
579                type_guid: Guid::from_bytes(PART_TYPE_GUID),
580                instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
581                start_block: 4,
582                num_blocks: 1,
583                flags: 0,
584            }],
585        )
586        .await;
587
588        let partitions_dir_clone = partitions_dir.clone();
589        let runner = GptManager::new(block_device.block_proxy(), partitions_dir_clone)
590            .await
591            .expect("load should succeed");
592        partitions_dir.get_entry("part-000").expect("No entry found");
593        partitions_dir.get_entry("part-001").map(|_| ()).expect_err("Extra entry found");
594        runner.shutdown().await;
595    }
596
597    #[fuchsia::test]
598    async fn load_formatted_gpt_with_two_partitions() {
599        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
600        const PART_INSTANCE_1_GUID: [u8; 16] = [2u8; 16];
601        const PART_INSTANCE_2_GUID: [u8; 16] = [3u8; 16];
602        const PART_1_NAME: &str = "part1";
603        const PART_2_NAME: &str = "part2";
604
605        let (block_device, partitions_dir) = setup(
606            512,
607            8,
608            vec![
609                PartitionInfo {
610                    label: PART_1_NAME.to_string(),
611                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
612                    instance_guid: Guid::from_bytes(PART_INSTANCE_1_GUID),
613                    start_block: 4,
614                    num_blocks: 1,
615                    flags: 0,
616                },
617                PartitionInfo {
618                    label: PART_2_NAME.to_string(),
619                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
620                    instance_guid: Guid::from_bytes(PART_INSTANCE_2_GUID),
621                    start_block: 5,
622                    num_blocks: 1,
623                    flags: 0,
624                },
625            ],
626        )
627        .await;
628
629        let partitions_dir_clone = partitions_dir.clone();
630        let runner = GptManager::new(block_device.block_proxy(), partitions_dir_clone)
631            .await
632            .expect("load should succeed");
633        partitions_dir.get_entry("part-000").expect("No entry found");
634        partitions_dir.get_entry("part-001").expect("No entry found");
635        partitions_dir.get_entry("part-002").map(|_| ()).expect_err("Extra entry found");
636        runner.shutdown().await;
637    }
638
639    #[fuchsia::test]
640    async fn partition_io() {
641        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
642        const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
643        const PART_NAME: &str = "part";
644
645        let (block_device, partitions_dir) = setup(
646            512,
647            8,
648            vec![PartitionInfo {
649                label: PART_NAME.to_string(),
650                type_guid: Guid::from_bytes(PART_TYPE_GUID),
651                instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
652                start_block: 4,
653                num_blocks: 2,
654                flags: 0,
655            }],
656        )
657        .await;
658
659        let partitions_dir_clone = partitions_dir.clone();
660        let runner = GptManager::new(block_device.block_proxy(), partitions_dir_clone)
661            .await
662            .expect("load should succeed");
663
664        let proxy = vfs::serve_directory(
665            partitions_dir.clone(),
666            vfs::path::Path::validate_and_split("part-000").unwrap(),
667            fio::PERM_READABLE,
668        );
669        let block =
670            connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&proxy, "volume")
671                .expect("Failed to open block service");
672        let client = RemoteBlockClient::new(block).await.expect("Failed to create block client");
673
674        assert_eq!(client.block_count(), 2);
675        assert_eq!(client.block_size(), 512);
676
677        let buf = vec![0xabu8; 512];
678        client.write_at(BufferSlice::Memory(&buf[..]), 0).await.expect("write_at failed");
679        client
680            .write_at(BufferSlice::Memory(&buf[..]), 1024)
681            .await
682            .expect_err("write_at should fail when writing past partition end");
683        let mut buf2 = vec![0u8; 512];
684        client.read_at(MutableBufferSlice::Memory(&mut buf2[..]), 0).await.expect("read_at failed");
685        assert_eq!(buf, buf2);
686        client
687            .read_at(MutableBufferSlice::Memory(&mut buf2[..]), 1024)
688            .await
689            .expect_err("read_at should fail when reading past partition end");
690        client.trim(512..1024).await.expect("trim failed");
691        client.trim(1..512).await.expect_err("trim with invalid range should fail");
692        client.trim(1024..1536).await.expect_err("trim past end of partition should fail");
693        runner.shutdown().await;
694
695        // Ensure writes persisted to the partition.
696        let mut buf = vec![0u8; 512];
697        let client = RemoteBlockClient::new(block_device.block_proxy()).await.unwrap();
698        client.read_at(MutableBufferSlice::Memory(&mut buf[..]), 2048).await.unwrap();
699        assert_eq!(&buf[..], &[0xabu8; 512]);
700    }
701
702    #[fuchsia::test]
703    async fn load_formatted_gpt_with_invalid_primary_header() {
704        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
705        const PART_INSTANCE_1_GUID: [u8; 16] = [2u8; 16];
706        const PART_INSTANCE_2_GUID: [u8; 16] = [3u8; 16];
707        const PART_1_NAME: &str = "part1";
708        const PART_2_NAME: &str = "part2";
709
710        let (block_device, partitions_dir) = setup(
711            512,
712            8,
713            vec![
714                PartitionInfo {
715                    label: PART_1_NAME.to_string(),
716                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
717                    instance_guid: Guid::from_bytes(PART_INSTANCE_1_GUID),
718                    start_block: 4,
719                    num_blocks: 1,
720                    flags: 0,
721                },
722                PartitionInfo {
723                    label: PART_2_NAME.to_string(),
724                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
725                    instance_guid: Guid::from_bytes(PART_INSTANCE_2_GUID),
726                    start_block: 5,
727                    num_blocks: 1,
728                    flags: 0,
729                },
730            ],
731        )
732        .await;
733        {
734            let (client, stream) =
735                fidl::endpoints::create_proxy_and_stream::<fvolume::VolumeMarker>();
736            let server = block_device.clone();
737            let _task = fasync::Task::spawn(async move { server.serve(stream).await });
738            let client = RemoteBlockClient::new(client).await.unwrap();
739            client.write_at(BufferSlice::Memory(&[0xffu8; 512]), 512).await.unwrap();
740        }
741
742        let runner = GptManager::new(block_device.block_proxy(), partitions_dir.clone())
743            .await
744            .expect("load should succeed");
745        partitions_dir.get_entry("part-000").expect("No entry found");
746        partitions_dir.get_entry("part-001").expect("No entry found");
747        runner.shutdown().await;
748    }
749
750    #[fuchsia::test]
751    async fn load_formatted_gpt_with_invalid_primary_partition_table() {
752        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
753        const PART_INSTANCE_1_GUID: [u8; 16] = [2u8; 16];
754        const PART_INSTANCE_2_GUID: [u8; 16] = [3u8; 16];
755        const PART_1_NAME: &str = "part1";
756        const PART_2_NAME: &str = "part2";
757
758        let (block_device, partitions_dir) = setup(
759            512,
760            8,
761            vec![
762                PartitionInfo {
763                    label: PART_1_NAME.to_string(),
764                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
765                    instance_guid: Guid::from_bytes(PART_INSTANCE_1_GUID),
766                    start_block: 4,
767                    num_blocks: 1,
768                    flags: 0,
769                },
770                PartitionInfo {
771                    label: PART_2_NAME.to_string(),
772                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
773                    instance_guid: Guid::from_bytes(PART_INSTANCE_2_GUID),
774                    start_block: 5,
775                    num_blocks: 1,
776                    flags: 0,
777                },
778            ],
779        )
780        .await;
781        {
782            let (client, stream) =
783                fidl::endpoints::create_proxy_and_stream::<fvolume::VolumeMarker>();
784            let server = block_device.clone();
785            let _task = fasync::Task::spawn(async move { server.serve(stream).await });
786            let client = RemoteBlockClient::new(client).await.unwrap();
787            client.write_at(BufferSlice::Memory(&[0xffu8; 512]), 1024).await.unwrap();
788        }
789
790        let runner = GptManager::new(block_device.block_proxy(), partitions_dir.clone())
791            .await
792            .expect("load should succeed");
793        partitions_dir.get_entry("part-000").expect("No entry found");
794        partitions_dir.get_entry("part-001").expect("No entry found");
795        runner.shutdown().await;
796    }
797
798    #[fuchsia::test]
799    async fn force_access_passed_through() {
800        const BLOCK_SIZE: u32 = 512;
801        const BLOCK_COUNT: u64 = 1024;
802
803        struct Observer(Arc<AtomicBool>);
804
805        impl fake_block_server::Observer for Observer {
806            fn write(
807                &self,
808                _device_block_offset: u64,
809                _block_count: u32,
810                _vmo: &Arc<zx::Vmo>,
811                _vmo_offset: u64,
812                opts: WriteOptions,
813            ) -> fake_block_server::WriteAction {
814                assert_eq!(
815                    opts.contains(WriteOptions::FORCE_ACCESS),
816                    self.0.load(Ordering::Relaxed)
817                );
818                fake_block_server::WriteAction::Write
819            }
820        }
821
822        let expect_force_access = Arc::new(AtomicBool::new(false));
823        let (server, partitions_dir) = setup_with_options(
824            FakeServerOptions {
825                block_count: Some(BLOCK_COUNT),
826                block_size: BLOCK_SIZE,
827                observer: Some(Box::new(Observer(expect_force_access.clone()))),
828                ..Default::default()
829            },
830            vec![PartitionInfo {
831                label: "foo".to_string(),
832                type_guid: Guid::from_bytes([1; 16]),
833                instance_guid: Guid::from_bytes([2; 16]),
834                start_block: 4,
835                num_blocks: 1,
836                flags: 0,
837            }],
838        )
839        .await;
840
841        let manager = GptManager::new(server.block_proxy(), partitions_dir.clone()).await.unwrap();
842
843        let proxy = vfs::serve_directory(
844            partitions_dir.clone(),
845            vfs::path::Path::validate_and_split("part-000").unwrap(),
846            fio::PERM_READABLE,
847        );
848        let block =
849            connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&proxy, "volume")
850                .expect("Failed to open block service");
851        let client = RemoteBlockClient::new(block).await.expect("Failed to create block client");
852
853        let buffer = vec![0; BLOCK_SIZE as usize];
854        client.write_at(BufferSlice::Memory(&buffer), 0).await.unwrap();
855
856        expect_force_access.store(true, Ordering::Relaxed);
857
858        client
859            .write_at_with_opts(BufferSlice::Memory(&buffer), 0, WriteOptions::FORCE_ACCESS)
860            .await
861            .unwrap();
862
863        manager.shutdown().await;
864    }
865
866    #[fuchsia::test]
867    async fn commit_transaction() {
868        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
869        const PART_1_INSTANCE_GUID: [u8; 16] = [2u8; 16];
870        const PART_1_NAME: &str = "part";
871        const PART_2_INSTANCE_GUID: [u8; 16] = [3u8; 16];
872        const PART_2_NAME: &str = "part2";
873
874        let (block_device, partitions_dir) = setup(
875            512,
876            16,
877            vec![
878                PartitionInfo {
879                    label: PART_1_NAME.to_string(),
880                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
881                    instance_guid: Guid::from_bytes(PART_1_INSTANCE_GUID),
882                    start_block: 4,
883                    num_blocks: 1,
884                    flags: 0,
885                },
886                PartitionInfo {
887                    label: PART_2_NAME.to_string(),
888                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
889                    instance_guid: Guid::from_bytes(PART_2_INSTANCE_GUID),
890                    start_block: 5,
891                    num_blocks: 1,
892                    flags: 0,
893                },
894            ],
895        )
896        .await;
897        let runner = GptManager::new(block_device.block_proxy(), partitions_dir.clone())
898            .await
899            .expect("load should succeed");
900
901        let part_0_dir = vfs::serve_directory(
902            partitions_dir.clone(),
903            vfs::Path::validate_and_split("part-000").unwrap(),
904            fio::PERM_READABLE,
905        );
906        let part_1_dir = vfs::serve_directory(
907            partitions_dir.clone(),
908            vfs::Path::validate_and_split("part-001").unwrap(),
909            fio::PERM_READABLE,
910        );
911        let part_0_proxy = connect_to_named_protocol_at_dir_root::<fpartitions::PartitionMarker>(
912            &part_0_dir,
913            "partition",
914        )
915        .expect("Failed to open Partition service");
916        let part_1_proxy = connect_to_named_protocol_at_dir_root::<fpartitions::PartitionMarker>(
917            &part_1_dir,
918            "partition",
919        )
920        .expect("Failed to open Partition service");
921
922        let transaction = runner.create_transaction().await.expect("Failed to create transaction");
923        part_0_proxy
924            .update_metadata(fpartitions::PartitionUpdateMetadataRequest {
925                transaction: Some(transaction.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()),
926                type_guid: Some(fidl_fuchsia_hardware_block_partition::Guid {
927                    value: [0xffu8; 16],
928                }),
929                ..Default::default()
930            })
931            .await
932            .expect("FIDL error")
933            .expect("Failed to update_metadata");
934        part_1_proxy
935            .update_metadata(fpartitions::PartitionUpdateMetadataRequest {
936                transaction: Some(transaction.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()),
937                flags: Some(1234),
938                ..Default::default()
939            })
940            .await
941            .expect("FIDL error")
942            .expect("Failed to update_metadata");
943        runner.commit_transaction(transaction).await.expect("Failed to commit transaction");
944
945        // Ensure the changes have propagated to the correct partitions.
946        let part_0_block =
947            connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&part_0_dir, "volume")
948                .expect("Failed to open Volume service");
949        let (status, guid) = part_0_block.get_type_guid().await.expect("FIDL error");
950        assert_eq!(zx::Status::from_raw(status), zx::Status::OK);
951        assert_eq!(guid.unwrap().value, [0xffu8; 16]);
952        let part_1_block =
953            connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&part_1_dir, "volume")
954                .expect("Failed to open Volume service");
955        let metadata =
956            part_1_block.get_metadata().await.expect("FIDL error").expect("get_metadata failed");
957        assert_eq!(metadata.type_guid.unwrap().value, PART_TYPE_GUID);
958        assert_eq!(metadata.flags, Some(1234));
959
960        runner.shutdown().await;
961    }
962
963    #[fuchsia::test]
964    async fn reset_partition_tables() {
965        // The test will reset the tables from ["part", "part2"] to
966        // ["part3", <empty>, "part4", <125 empty entries>].
967        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
968        const PART_1_INSTANCE_GUID: [u8; 16] = [2u8; 16];
969        const PART_1_NAME: &str = "part";
970        const PART_2_INSTANCE_GUID: [u8; 16] = [3u8; 16];
971        const PART_2_NAME: &str = "part2";
972        const PART_3_NAME: &str = "part3";
973        const PART_4_NAME: &str = "part4";
974
975        let (block_device, partitions_dir) = setup(
976            512,
977            1048576 / 512,
978            vec![
979                PartitionInfo {
980                    label: PART_1_NAME.to_string(),
981                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
982                    instance_guid: Guid::from_bytes(PART_1_INSTANCE_GUID),
983                    start_block: 4,
984                    num_blocks: 1,
985                    flags: 0,
986                },
987                PartitionInfo {
988                    label: PART_2_NAME.to_string(),
989                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
990                    instance_guid: Guid::from_bytes(PART_2_INSTANCE_GUID),
991                    start_block: 5,
992                    num_blocks: 1,
993                    flags: 0,
994                },
995            ],
996        )
997        .await;
998        let runner = GptManager::new(block_device.block_proxy(), partitions_dir.clone())
999            .await
1000            .expect("load should succeed");
1001        let nil_entry = PartitionInfo {
1002            label: "".to_string(),
1003            type_guid: Guid::from_bytes([0u8; 16]),
1004            instance_guid: Guid::from_bytes([0u8; 16]),
1005            start_block: 0,
1006            num_blocks: 0,
1007            flags: 0,
1008        };
1009        let mut new_partitions = vec![nil_entry; 128];
1010        new_partitions[0] = PartitionInfo {
1011            label: PART_3_NAME.to_string(),
1012            type_guid: Guid::from_bytes(PART_TYPE_GUID),
1013            instance_guid: Guid::from_bytes([1u8; 16]),
1014            start_block: 64,
1015            num_blocks: 2,
1016            flags: 0,
1017        };
1018        new_partitions[2] = PartitionInfo {
1019            label: PART_4_NAME.to_string(),
1020            type_guid: Guid::from_bytes(PART_TYPE_GUID),
1021            instance_guid: Guid::from_bytes([2u8; 16]),
1022            start_block: 66,
1023            num_blocks: 4,
1024            flags: 0,
1025        };
1026        runner.reset_partition_table(new_partitions).await.expect("reset_partition_table failed");
1027        partitions_dir.get_entry("part-000").expect("No entry found");
1028        partitions_dir.get_entry("part-001").map(|_| ()).expect_err("Extra entry found");
1029        partitions_dir.get_entry("part-002").expect("No entry found");
1030
1031        let proxy = vfs::serve_directory(
1032            partitions_dir.clone(),
1033            vfs::path::Path::validate_and_split("part-000").unwrap(),
1034            fio::PERM_READABLE,
1035        );
1036        let block =
1037            connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&proxy, "volume")
1038                .expect("Failed to open block service");
1039        let (status, name) = block.get_name().await.expect("FIDL error");
1040        assert_eq!(zx::Status::from_raw(status), zx::Status::OK);
1041        assert_eq!(name.unwrap(), PART_3_NAME);
1042
1043        runner.shutdown().await;
1044    }
1045
1046    #[fuchsia::test]
1047    async fn reset_partition_tables_fails_if_too_many_partitions() {
1048        let (block_device, partitions_dir) = setup(512, 8, vec![]).await;
1049        let runner = GptManager::new(block_device.block_proxy(), partitions_dir.clone())
1050            .await
1051            .expect("load should succeed");
1052        let nil_entry = PartitionInfo {
1053            label: "".to_string(),
1054            type_guid: Guid::from_bytes([0u8; 16]),
1055            instance_guid: Guid::from_bytes([0u8; 16]),
1056            start_block: 0,
1057            num_blocks: 0,
1058            flags: 0,
1059        };
1060        let new_partitions = vec![nil_entry; 128];
1061        runner
1062            .reset_partition_table(new_partitions)
1063            .await
1064            .expect_err("reset_partition_table should fail");
1065
1066        runner.shutdown().await;
1067    }
1068
1069    #[fuchsia::test]
1070    async fn reset_partition_tables_fails_if_too_large_partitions() {
1071        let (block_device, partitions_dir) = setup(512, 64, vec![]).await;
1072        let runner = GptManager::new(block_device.block_proxy(), partitions_dir.clone())
1073            .await
1074            .expect("load should succeed");
1075        let new_partitions = vec![
1076            PartitionInfo {
1077                label: "a".to_string(),
1078                type_guid: Guid::from_bytes([1u8; 16]),
1079                instance_guid: Guid::from_bytes([1u8; 16]),
1080                start_block: 4,
1081                num_blocks: 2,
1082                flags: 0,
1083            },
1084            PartitionInfo {
1085                label: "b".to_string(),
1086                type_guid: Guid::from_bytes([2u8; 16]),
1087                instance_guid: Guid::from_bytes([2u8; 16]),
1088                start_block: 6,
1089                num_blocks: 200,
1090                flags: 0,
1091            },
1092        ];
1093        runner
1094            .reset_partition_table(new_partitions)
1095            .await
1096            .expect_err("reset_partition_table should fail");
1097
1098        runner.shutdown().await;
1099    }
1100
1101    #[fuchsia::test]
1102    async fn reset_partition_tables_fails_if_partition_overlaps_metadata() {
1103        let (block_device, partitions_dir) = setup(512, 64, vec![]).await;
1104        let runner = GptManager::new(block_device.block_proxy(), partitions_dir.clone())
1105            .await
1106            .expect("load should succeed");
1107        let new_partitions = vec![PartitionInfo {
1108            label: "a".to_string(),
1109            type_guid: Guid::from_bytes([1u8; 16]),
1110            instance_guid: Guid::from_bytes([1u8; 16]),
1111            start_block: 1,
1112            num_blocks: 2,
1113            flags: 0,
1114        }];
1115        runner
1116            .reset_partition_table(new_partitions)
1117            .await
1118            .expect_err("reset_partition_table should fail");
1119
1120        runner.shutdown().await;
1121    }
1122
1123    #[fuchsia::test]
1124    async fn reset_partition_tables_fails_if_partitions_overlap() {
1125        let (block_device, partitions_dir) = setup(512, 64, vec![]).await;
1126        let runner = GptManager::new(block_device.block_proxy(), partitions_dir.clone())
1127            .await
1128            .expect("load should succeed");
1129        let new_partitions = vec![
1130            PartitionInfo {
1131                label: "a".to_string(),
1132                type_guid: Guid::from_bytes([1u8; 16]),
1133                instance_guid: Guid::from_bytes([1u8; 16]),
1134                start_block: 32,
1135                num_blocks: 2,
1136                flags: 0,
1137            },
1138            PartitionInfo {
1139                label: "b".to_string(),
1140                type_guid: Guid::from_bytes([2u8; 16]),
1141                instance_guid: Guid::from_bytes([2u8; 16]),
1142                start_block: 33,
1143                num_blocks: 1,
1144                flags: 0,
1145            },
1146        ];
1147        runner
1148            .reset_partition_table(new_partitions)
1149            .await
1150            .expect_err("reset_partition_table should fail");
1151
1152        runner.shutdown().await;
1153    }
1154
1155    #[fuchsia::test]
1156    async fn add_partition() {
1157        let (block_device, partitions_dir) = setup(512, 64, vec![PartitionInfo::nil(); 64]).await;
1158        let runner = GptManager::new(block_device.block_proxy(), partitions_dir.clone())
1159            .await
1160            .expect("load should succeed");
1161
1162        let transaction = runner.create_transaction().await.expect("Create transaction failed");
1163        let request = fpartitions::PartitionsManagerAddPartitionRequest {
1164            transaction: Some(transaction.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()),
1165            name: Some("a".to_string()),
1166            type_guid: Some(fidl_fuchsia_hardware_block_partition::Guid { value: [1u8; 16] }),
1167            num_blocks: Some(2),
1168            ..Default::default()
1169        };
1170        runner.add_partition(request).await.expect("add_partition failed");
1171        runner.commit_transaction(transaction).await.expect("add_partition failed");
1172
1173        let proxy = vfs::serve_directory(
1174            partitions_dir.clone(),
1175            vfs::path::Path::validate_and_split("part-000").unwrap(),
1176            fio::PERM_READABLE,
1177        );
1178        let block =
1179            connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&proxy, "volume")
1180                .expect("Failed to open block service");
1181        let client = RemoteBlockClient::new(block).await.expect("Failed to create block client");
1182
1183        assert_eq!(client.block_count(), 2);
1184        assert_eq!(client.block_size(), 512);
1185
1186        runner.shutdown().await;
1187    }
1188
1189    #[fuchsia::test]
1190    async fn partition_info() {
1191        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
1192        const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
1193        const PART_NAME: &str = "part";
1194
1195        let (block_device, partitions_dir) = setup_with_options(
1196            FakeServerOptions {
1197                block_count: Some(8),
1198                block_size: 512,
1199                flags: fblock::Flag::READONLY | fblock::Flag::REMOVABLE,
1200                ..Default::default()
1201            },
1202            vec![PartitionInfo {
1203                label: PART_NAME.to_string(),
1204                type_guid: Guid::from_bytes(PART_TYPE_GUID),
1205                instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
1206                start_block: 4,
1207                num_blocks: 1,
1208                flags: 0xabcd,
1209            }],
1210        )
1211        .await;
1212
1213        let partitions_dir_clone = partitions_dir.clone();
1214        let runner = GptManager::new(block_device.block_proxy(), partitions_dir_clone)
1215            .await
1216            .expect("load should succeed");
1217
1218        let part_dir = vfs::serve_directory(
1219            partitions_dir.clone(),
1220            vfs::path::Path::validate_and_split("part-000").unwrap(),
1221            fio::PERM_READABLE,
1222        );
1223        let part_block =
1224            connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&part_dir, "volume")
1225                .expect("Failed to open Volume service");
1226        let info = part_block.get_info().await.expect("FIDL error").expect("get_info failed");
1227        assert_eq!(info.block_count, 1);
1228        assert_eq!(info.block_size, 512);
1229        assert_eq!(info.flags, fblock::Flag::READONLY | fblock::Flag::REMOVABLE);
1230
1231        let metadata =
1232            part_block.get_metadata().await.expect("FIDL error").expect("get_metadata failed");
1233        assert_eq!(metadata.name, Some(PART_NAME.to_string()));
1234        assert_eq!(metadata.type_guid.unwrap().value, PART_TYPE_GUID);
1235        assert_eq!(metadata.instance_guid.unwrap().value, PART_INSTANCE_GUID);
1236        assert_eq!(metadata.start_block_offset, Some(4));
1237        assert_eq!(metadata.num_blocks, Some(1));
1238        assert_eq!(metadata.flags, Some(0xabcd));
1239
1240        runner.shutdown().await;
1241    }
1242
1243    #[fuchsia::test]
1244    async fn nested_gpt() {
1245        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
1246        const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
1247        const PART_NAME: &str = "part";
1248
1249        let vmo = zx::Vmo::create(64 * 512).unwrap();
1250        let vmo_clone = vmo.create_child(zx::VmoChildOptions::REFERENCE, 0, 0).unwrap();
1251        let (outer_block_device, outer_partitions_dir) = setup_with_options(
1252            FakeServerOptions {
1253                vmo: Some(vmo_clone),
1254                block_size: 512,
1255                flags: fblock::Flag::READONLY | fblock::Flag::REMOVABLE,
1256                ..Default::default()
1257            },
1258            vec![PartitionInfo {
1259                label: PART_NAME.to_string(),
1260                type_guid: Guid::from_bytes(PART_TYPE_GUID),
1261                instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
1262                start_block: 4,
1263                num_blocks: 16,
1264                flags: 0xabcd,
1265            }],
1266        )
1267        .await;
1268
1269        let outer_partitions_dir_clone = outer_partitions_dir.clone();
1270        let outer_runner =
1271            GptManager::new(outer_block_device.block_proxy(), outer_partitions_dir_clone)
1272                .await
1273                .expect("load should succeed");
1274
1275        let outer_part_dir = vfs::serve_directory(
1276            outer_partitions_dir.clone(),
1277            vfs::path::Path::validate_and_split("part-000").unwrap(),
1278            fio::PERM_READABLE,
1279        );
1280        let part_block =
1281            connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&outer_part_dir, "volume")
1282                .expect("Failed to open Block service");
1283
1284        let client = Arc::new(RemoteBlockClient::new(part_block.clone()).await.unwrap());
1285        let _ = gpt::Gpt::format(
1286            client,
1287            vec![PartitionInfo {
1288                label: PART_NAME.to_string(),
1289                type_guid: Guid::from_bytes(PART_TYPE_GUID),
1290                instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
1291                start_block: 5,
1292                num_blocks: 1,
1293                flags: 0xabcd,
1294            }],
1295        )
1296        .await
1297        .unwrap();
1298
1299        let partitions_dir = vfs::directory::immutable::simple();
1300        let partitions_dir_clone = partitions_dir.clone();
1301        let runner =
1302            GptManager::new(part_block, partitions_dir_clone).await.expect("load should succeed");
1303        let part_dir = vfs::serve_directory(
1304            partitions_dir.clone(),
1305            vfs::path::Path::validate_and_split("part-000").unwrap(),
1306            fio::PERM_READABLE,
1307        );
1308        let inner_part_block =
1309            connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&part_dir, "volume")
1310                .expect("Failed to open Block service");
1311
1312        let client =
1313            RemoteBlockClient::new(inner_part_block).await.expect("Failed to create block client");
1314        assert_eq!(client.block_count(), 1);
1315        assert_eq!(client.block_size(), 512);
1316
1317        let buffer = vec![0xaa; 512];
1318        client.write_at(BufferSlice::Memory(&buffer), 0).await.unwrap();
1319        client
1320            .write_at(BufferSlice::Memory(&buffer), 512)
1321            .await
1322            .expect_err("Write past end should fail");
1323        client.flush().await.unwrap();
1324
1325        runner.shutdown().await;
1326        outer_runner.shutdown().await;
1327
1328        // Check that the write targeted the correct block (4 + 5 = 9)
1329        let data = vmo.read_to_vec(9 * 512, 512).unwrap();
1330        assert_eq!(&data[..], &buffer[..]);
1331    }
1332
1333    #[fuchsia::test]
1334    async fn offset_map_does_not_allow_partition_overwrite() {
1335        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
1336        const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
1337        const PART_NAME: &str = "part";
1338
1339        let (block_device, partitions_dir) = setup_with_options(
1340            FakeServerOptions {
1341                block_count: Some(16),
1342                block_size: 512,
1343                flags: fblock::Flag::READONLY | fblock::Flag::REMOVABLE,
1344                ..Default::default()
1345            },
1346            vec![PartitionInfo {
1347                label: PART_NAME.to_string(),
1348                type_guid: Guid::from_bytes(PART_TYPE_GUID),
1349                instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
1350                start_block: 4,
1351                num_blocks: 2,
1352                flags: 0xabcd,
1353            }],
1354        )
1355        .await;
1356
1357        let partitions_dir_clone = partitions_dir.clone();
1358        let runner = GptManager::new(block_device.block_proxy(), partitions_dir_clone)
1359            .await
1360            .expect("load should succeed");
1361
1362        let part_dir = vfs::serve_directory(
1363            partitions_dir.clone(),
1364            vfs::path::Path::validate_and_split("part-000").unwrap(),
1365            fio::PERM_READABLE,
1366        );
1367
1368        // Open a session that shifts all block offsets by one.  The apparent range of the partition
1369        // should be [0..512) bytes (which corresponds to [512..1024) in the partition), because
1370        // bytes [512..1024) would be mapped to [1024..1536) which exceeds the partition's limit.
1371        let part_block =
1372            connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&part_dir, "volume")
1373                .expect("Failed to open Block service");
1374        let info = part_block.get_info().await.expect("FIDL error").expect("get_info failed");
1375        let (session, server_end) = fidl::endpoints::create_proxy::<fblock::SessionMarker>();
1376        part_block
1377            .open_session_with_offset_map(
1378                server_end,
1379                None,
1380                Some(&[fblock::BlockOffsetMapping {
1381                    source_block_offset: 0,
1382                    target_block_offset: 1,
1383                    length: 2,
1384                }]),
1385            )
1386            .expect("FIDL error");
1387
1388        let client = Arc::new(RemoteBlockClient::from_session(info, session).await.unwrap());
1389        let mut buffer = vec![0xaa; 512];
1390        client.flush().await.expect("Flush should succeed");
1391        client
1392            .read_at(MutableBufferSlice::Memory(&mut buffer), 0)
1393            .await
1394            .expect("Read should succeed");
1395        client.write_at(BufferSlice::Memory(&buffer), 0).await.expect("Write should succeed");
1396        client
1397            .read_at(MutableBufferSlice::Memory(&mut buffer), 512)
1398            .await
1399            .expect_err("Read past end should fail");
1400        client
1401            .write_at(BufferSlice::Memory(&buffer), 512)
1402            .await
1403            .expect_err("Write past end should fail");
1404
1405        runner.shutdown().await;
1406    }
1407}