gpt_component/
partition.rs

1// Copyright 2024 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4use crate::gpt::GptPartition;
5use anyhow::Error;
6use block_client::{ReadOptions, VmoId, WriteOptions};
7use block_server::OffsetMap;
8use block_server::async_interface::{PassthroughSession, SessionManager};
9use fidl_fuchsia_hardware_block as fblock;
10
11use fuchsia_sync::Mutex;
12use std::borrow::Cow;
13use std::collections::BTreeMap;
14use std::num::NonZero;
15use std::sync::Arc;
16
17/// PartitionBackend is an implementation of block_server's Interface which is backed by a windowed
18/// view of the underlying GPT device.
19pub struct PartitionBackend {
20    partition: Arc<GptPartition>,
21    vmo_keys_to_vmoids_map: Mutex<BTreeMap<usize, Arc<VmoId>>>,
22}
23
24impl block_server::async_interface::Interface for PartitionBackend {
25    async fn open_session(
26        &self,
27        session_manager: Arc<SessionManager<Self>>,
28        stream: fblock::SessionRequestStream,
29        offset_map: OffsetMap,
30        block_size: u32,
31    ) -> Result<(), Error> {
32        if !offset_map.is_empty() {
33            // For now, we don't support double-passthrough.  We could as needed for nested GPT.
34            // If we support this, we can remove I/O and vmoid management from this struct.
35            return session_manager.serve_session(stream, offset_map, block_size).await;
36        }
37        let (proxy, server_end) = fidl::endpoints::create_proxy::<fblock::SessionMarker>();
38        self.partition.open_passthrough_session(server_end);
39        let passthrough = PassthroughSession::new(proxy);
40        passthrough.serve(stream).await
41    }
42
43    async fn on_attach_vmo(&self, vmo: &zx::Vmo) -> Result<(), zx::Status> {
44        let key = std::ptr::from_ref(vmo) as usize;
45        let vmoid = self.partition.attach_vmo(vmo).await?;
46        let old = self.vmo_keys_to_vmoids_map.lock().insert(key, Arc::new(vmoid));
47        if let Some(vmoid) = old {
48            // For now, leak the old vmoid.
49            // XXX kludge -- addresses can be reused!  We need to manage vmoids ourself to properly
50            // manage lifetimes, or possibly change the APIs to eliminate the need to do so.
51            // TODO(https://fxbug.dev/339491886): Reconcile vmoid management.
52            let _ = Arc::try_unwrap(vmoid)
53                .map(|vmoid| vmoid.into_id())
54                .expect("VMO removed while in use");
55        }
56        Ok(())
57    }
58
59    async fn get_info(&self) -> Result<Cow<'_, block_server::DeviceInfo>, zx::Status> {
60        Ok(Cow::Owned(self.partition.get_info()))
61    }
62
63    fn barrier(&self) -> Result<(), zx::Status> {
64        self.partition.barrier();
65        Ok(())
66    }
67
68    async fn read(
69        &self,
70        device_block_offset: u64,
71        block_count: u32,
72        vmo: &Arc<zx::Vmo>,
73        vmo_offset: u64, // *bytes* not blocks
74        trace_flow_id: Option<NonZero<u64>>,
75    ) -> Result<(), zx::Status> {
76        let vmoid = self.get_vmoid(vmo)?;
77        self.partition
78            .read(
79                device_block_offset,
80                block_count,
81                vmoid.as_ref(),
82                vmo_offset,
83                // TODO(https://fxbug.dev/441395652): Plumb InlineCryptoOptions through the
84                // block_server
85                ReadOptions::default(),
86                trace_flow_id,
87            )
88            .await
89    }
90
91    async fn write(
92        &self,
93        device_block_offset: u64,
94        length: u32,
95        vmo: &Arc<zx::Vmo>,
96        vmo_offset: u64, // *bytes* not blocks
97        write_opts: WriteOptions,
98        trace_flow_id: Option<NonZero<u64>>,
99    ) -> Result<(), zx::Status> {
100        let vmoid = self.get_vmoid(vmo)?;
101        self.partition
102            .write(
103                device_block_offset,
104                length,
105                vmoid.as_ref(),
106                vmo_offset,
107                write_opts,
108                trace_flow_id,
109            )
110            .await
111    }
112
113    async fn flush(&self, trace_flow_id: Option<NonZero<u64>>) -> Result<(), zx::Status> {
114        self.partition.flush(trace_flow_id).await
115    }
116
117    async fn trim(
118        &self,
119        device_block_offset: u64,
120        block_count: u32,
121        trace_flow_id: Option<NonZero<u64>>,
122    ) -> Result<(), zx::Status> {
123        self.partition.trim(device_block_offset, block_count, trace_flow_id).await
124    }
125}
126
127impl PartitionBackend {
128    pub fn new(partition: Arc<GptPartition>) -> Arc<Self> {
129        Arc::new(Self { partition, vmo_keys_to_vmoids_map: Mutex::new(BTreeMap::new()) })
130    }
131
132    /// Returns the old info.
133    pub fn update_info(&self, info: gpt::PartitionInfo) -> gpt::PartitionInfo {
134        self.partition.update_info(info)
135    }
136
137    fn get_vmoid(&self, vmo: &zx::Vmo) -> Result<Arc<VmoId>, zx::Status> {
138        let key = std::ptr::from_ref(vmo) as usize;
139        self.vmo_keys_to_vmoids_map.lock().get(&key).map(Arc::clone).ok_or(zx::Status::NOT_FOUND)
140    }
141}
142
143impl Drop for PartitionBackend {
144    fn drop(&mut self) {
145        for vmoid in std::mem::take(&mut *self.vmo_keys_to_vmoids_map.lock()).into_values() {
146            // For now, leak the vmoids.
147            // TODO(https://fxbug.dev/339491886): Reconcile vmoid management.
148            let _ = Arc::try_unwrap(vmoid)
149                .map(|vmoid| vmoid.into_id())
150                .expect("VMO removed while in use");
151        }
152    }
153}