gpt_component/
partition.rs1use crate::gpt::GptPartition;
5use anyhow::Error;
6use block_client::{ReadOptions, VmoId, WriteOptions};
7use block_server::async_interface::{PassthroughSession, SessionManager};
8use block_server::{DeviceInfo, OffsetMap};
9use fidl_fuchsia_hardware_block as fblock;
10
11use fuchsia_sync::Mutex;
12use std::borrow::Cow;
13use std::collections::BTreeMap;
14use std::num::NonZero;
15use std::sync::Arc;
16
17pub struct PartitionBackend {
20 partition: Arc<GptPartition>,
21 vmo_keys_to_vmoids_map: Mutex<BTreeMap<usize, Arc<VmoId>>>,
22}
23
24impl block_server::async_interface::Interface for PartitionBackend {
25 async fn open_session(
26 &self,
27 session_manager: Arc<SessionManager<Self>>,
28 stream: fblock::SessionRequestStream,
29 offset_map: OffsetMap,
30 block_size: u32,
31 ) -> Result<(), Error> {
32 if !offset_map.is_empty() {
33 return session_manager.serve_session(stream, offset_map, block_size).await;
36 }
37 let (proxy, server_end) = fidl::endpoints::create_proxy::<fblock::SessionMarker>();
38 self.partition.open_passthrough_session(server_end);
39 let passthrough = PassthroughSession::new(proxy);
40 passthrough.serve(stream).await
41 }
42
43 async fn on_attach_vmo(&self, vmo: &zx::Vmo) -> Result<(), zx::Status> {
44 let key = std::ptr::from_ref(vmo) as usize;
45 let vmoid = self.partition.attach_vmo(vmo).await?;
46 let old = self.vmo_keys_to_vmoids_map.lock().insert(key, Arc::new(vmoid));
47 if let Some(vmoid) = old {
48 let _ = Arc::try_unwrap(vmoid)
53 .map(|vmoid| vmoid.into_id())
54 .expect("VMO removed while in use");
55 }
56 Ok(())
57 }
58
59 fn get_info(&self) -> Cow<'_, DeviceInfo> {
60 Cow::Owned(self.partition.get_info())
61 }
62
63 async fn read(
64 &self,
65 device_block_offset: u64,
66 block_count: u32,
67 vmo: &Arc<zx::Vmo>,
68 vmo_offset: u64, opts: ReadOptions,
70 trace_flow_id: Option<NonZero<u64>>,
71 ) -> Result<(), zx::Status> {
72 let vmoid = self.get_vmoid(vmo)?;
73 self.partition
74 .read(device_block_offset, block_count, vmoid.as_ref(), vmo_offset, opts, trace_flow_id)
75 .await
76 }
77
78 async fn write(
79 &self,
80 device_block_offset: u64,
81 length: u32,
82 vmo: &Arc<zx::Vmo>,
83 vmo_offset: u64, opts: WriteOptions,
85 trace_flow_id: Option<NonZero<u64>>,
86 ) -> Result<(), zx::Status> {
87 let vmoid = self.get_vmoid(vmo)?;
88 self.partition
89 .write(device_block_offset, length, vmoid.as_ref(), vmo_offset, opts, trace_flow_id)
90 .await
91 }
92
93 async fn flush(&self, trace_flow_id: Option<NonZero<u64>>) -> Result<(), zx::Status> {
94 self.partition.flush(trace_flow_id).await
95 }
96
97 async fn trim(
98 &self,
99 device_block_offset: u64,
100 block_count: u32,
101 trace_flow_id: Option<NonZero<u64>>,
102 ) -> Result<(), zx::Status> {
103 self.partition.trim(device_block_offset, block_count, trace_flow_id).await
104 }
105}
106
107impl PartitionBackend {
108 pub fn new(partition: Arc<GptPartition>) -> Arc<Self> {
109 Arc::new(Self { partition, vmo_keys_to_vmoids_map: Mutex::new(BTreeMap::new()) })
110 }
111
112 pub fn update_info(&self, info: gpt::PartitionInfo) -> gpt::PartitionInfo {
114 self.partition.update_info(info)
115 }
116
117 fn get_vmoid(&self, vmo: &zx::Vmo) -> Result<Arc<VmoId>, zx::Status> {
118 let key = std::ptr::from_ref(vmo) as usize;
119 self.vmo_keys_to_vmoids_map.lock().get(&key).map(Arc::clone).ok_or(zx::Status::NOT_FOUND)
120 }
121}
122
123impl Drop for PartitionBackend {
124 fn drop(&mut self) {
125 for vmoid in std::mem::take(&mut *self.vmo_keys_to_vmoids_map.lock()).into_values() {
126 let _ = Arc::try_unwrap(vmoid)
129 .map(|vmoid| vmoid.into_id())
130 .expect("VMO removed while in use");
131 }
132 }
133}