Skip to main content

gpt_component/
partition.rs

1// Copyright 2024 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4use crate::gpt::GptPartition;
5use anyhow::Error;
6use block_client::{ReadOptions, VmoId, WriteOptions};
7use block_server::async_interface::{PassthroughSession, SessionManager};
8use block_server::{DeviceInfo, OffsetMap};
9use fidl_fuchsia_storage_block as fblock;
10
11use fuchsia_sync::Mutex;
12use std::borrow::Cow;
13use std::collections::BTreeMap;
14use std::num::NonZero;
15use std::sync::Arc;
16
17/// PartitionBackend is an implementation of block_server's Interface which is backed by a windowed
18/// view of the underlying GPT device.
19pub struct PartitionBackend {
20    partition: Arc<GptPartition>,
21    vmo_keys_to_vmoids_map: Mutex<BTreeMap<usize, Arc<VmoId>>>,
22    passthrough: bool,
23}
24
25impl block_server::async_interface::Interface for PartitionBackend {
26    async fn open_session(
27        &self,
28        session_manager: Arc<SessionManager<Self>>,
29        stream: fblock::SessionRequestStream,
30        offset_map: OffsetMap,
31        block_size: u32,
32    ) -> Result<(), Error> {
33        if !self.passthrough || !offset_map.is_empty() {
34            // For now, we don't support double-passthrough.  We could as needed for nested GPT.
35            // If we support this, we can remove I/O and vmoid management from this struct.
36            return session_manager.serve_session(stream, offset_map, block_size).await;
37        }
38        let (proxy, server_end) = fidl::endpoints::create_proxy::<fblock::SessionMarker>();
39        self.partition.open_passthrough_session(server_end);
40        let passthrough = PassthroughSession::new(proxy);
41        passthrough.serve(stream).await
42    }
43
44    async fn on_attach_vmo(&self, vmo: &zx::Vmo) -> Result<(), zx::Status> {
45        let key = std::ptr::from_ref(vmo) as usize;
46        let vmoid = self.partition.attach_vmo(vmo).await?;
47        let old = self.vmo_keys_to_vmoids_map.lock().insert(key, Arc::new(vmoid));
48        if let Some(vmoid) = old {
49            // For now, leak the old vmoid.
50            // XXX kludge -- addresses can be reused!  We need to manage vmoids ourself to properly
51            // manage lifetimes, or possibly change the APIs to eliminate the need to do so.
52            // TODO(https://fxbug.dev/339491886): Reconcile vmoid management.
53            let _ = Arc::try_unwrap(vmoid)
54                .map(|vmoid| vmoid.into_id())
55                .expect("VMO removed while in use");
56        }
57        Ok(())
58    }
59
60    fn get_info(&self) -> Cow<'_, DeviceInfo> {
61        Cow::Owned(self.partition.get_info())
62    }
63
64    async fn read(
65        &self,
66        device_block_offset: u64,
67        block_count: u32,
68        vmo: &Arc<zx::Vmo>,
69        vmo_offset: u64, // *bytes* not blocks
70        opts: ReadOptions,
71        trace_flow_id: Option<NonZero<u64>>,
72    ) -> Result<(), zx::Status> {
73        let vmoid = self.get_vmoid(vmo)?;
74        self.partition
75            .read(device_block_offset, block_count, vmoid.as_ref(), vmo_offset, opts, trace_flow_id)
76            .await
77    }
78
79    async fn write(
80        &self,
81        device_block_offset: u64,
82        length: u32,
83        vmo: &Arc<zx::Vmo>,
84        vmo_offset: u64, // *bytes* not blocks
85        opts: WriteOptions,
86        trace_flow_id: Option<NonZero<u64>>,
87    ) -> Result<(), zx::Status> {
88        let vmoid = self.get_vmoid(vmo)?;
89        self.partition
90            .write(device_block_offset, length, vmoid.as_ref(), vmo_offset, opts, trace_flow_id)
91            .await
92    }
93
94    async fn flush(&self, trace_flow_id: Option<NonZero<u64>>) -> Result<(), zx::Status> {
95        self.partition.flush(trace_flow_id).await
96    }
97
98    async fn trim(
99        &self,
100        device_block_offset: u64,
101        block_count: u32,
102        trace_flow_id: Option<NonZero<u64>>,
103    ) -> Result<(), zx::Status> {
104        self.partition.trim(device_block_offset, block_count, trace_flow_id).await
105    }
106}
107
108impl PartitionBackend {
109    pub fn new(partition: Arc<GptPartition>, passthrough: bool) -> Arc<Self> {
110        Arc::new(Self {
111            partition,
112            vmo_keys_to_vmoids_map: Mutex::new(BTreeMap::new()),
113            passthrough,
114        })
115    }
116
117    /// Returns the old info.
118    pub fn update_info(&self, info: gpt::PartitionInfo) -> gpt::PartitionInfo {
119        self.partition.update_info(info)
120    }
121
122    fn get_vmoid(&self, vmo: &zx::Vmo) -> Result<Arc<VmoId>, zx::Status> {
123        let key = std::ptr::from_ref(vmo) as usize;
124        self.vmo_keys_to_vmoids_map.lock().get(&key).map(Arc::clone).ok_or(zx::Status::NOT_FOUND)
125    }
126}
127
128impl Drop for PartitionBackend {
129    fn drop(&mut self) {
130        for vmoid in std::mem::take(&mut *self.vmo_keys_to_vmoids_map.lock()).into_values() {
131            // For now, leak the vmoids.
132            // TODO(https://fxbug.dev/339491886): Reconcile vmoid management.
133            let _ = Arc::try_unwrap(vmoid)
134                .map(|vmoid| vmoid.into_id())
135                .expect("VMO removed while in use");
136        }
137    }
138}