1use crate::Device;
6use crate::buffer::{BufferFuture, BufferRef, MutableBufferRef};
7use crate::buffer_allocator::{BufferAllocator, BufferSource};
8use anyhow::{Error, bail, ensure};
9use async_trait::async_trait;
10use block_client::{
11 BlockClient, BlockDeviceFlag, BufferSlice, MutableBufferSlice, ReadOptions, VmoId, WriteOptions,
12};
13use std::ops::Range;
14use zx::Status;
15
16pub struct BlockDevice<T> {
18 allocator: BufferAllocator,
19 remote: T,
20 read_only: bool,
21 vmoid: VmoId,
22}
23
24const TRANSFER_VMO_SIZE: usize = 128 * 1024 * 1024;
25
26impl<T: BlockClient> BlockDevice<T> {
27 pub async fn new(remote: T, read_only: bool) -> Result<Self, Error> {
29 let buffer_source = BufferSource::new(TRANSFER_VMO_SIZE);
30 let vmoid = remote.attach_vmo(buffer_source.vmo()).await?;
31 let allocator = BufferAllocator::new(remote.block_size() as usize, buffer_source);
32 Ok(Self { allocator, remote, read_only, vmoid })
33 }
34}
35
36#[async_trait]
37impl<T: BlockClient> Device for BlockDevice<T> {
38 fn allocate_buffer(&self, size: usize) -> BufferFuture<'_> {
39 self.allocator.allocate_buffer(size)
40 }
41
42 fn clean_transfer_buffer(&self) {
43 self.allocator.clean_transfer_buffer();
44 }
45
46 fn block_size(&self) -> u32 {
47 self.remote.block_size()
48 }
49
50 fn block_count(&self) -> u64 {
51 self.remote.block_count()
52 }
53
54 async fn read_with_opts(
55 &self,
56 offset: u64,
57 buffer: MutableBufferRef<'_>,
58 read_opts: ReadOptions,
59 ) -> Result<(), Error> {
60 if buffer.len() == 0 {
61 return Ok(());
62 }
63 ensure!(self.vmoid.is_valid(), Status::INVALID_ARGS);
64 ensure!(offset % (self.block_size() as u64) == 0, Status::INVALID_ARGS);
65 ensure!(buffer.range().start % (self.block_size() as usize) == 0, Status::INVALID_ARGS);
66 ensure!(buffer.range().end % (self.block_size() as usize) == 0, Status::INVALID_ARGS);
67 Ok(self
68 .remote
69 .read_at_with_opts(
70 MutableBufferSlice::new_with_vmo_id(
71 &self.vmoid,
72 buffer.range().start as u64,
73 buffer.len() as u64,
74 ),
75 offset,
76 read_opts,
77 )
78 .await?)
79 }
80
81 async fn write_with_opts(
82 &self,
83 offset: u64,
84 buffer: BufferRef<'_>,
85 opts: WriteOptions,
86 ) -> Result<(), Error> {
87 if self.read_only {
88 bail!(Status::ACCESS_DENIED);
89 }
90 if buffer.len() == 0 {
91 return Ok(());
92 }
93 ensure!(self.vmoid.is_valid(), "Device is closed");
94 ensure!(offset % (self.block_size() as u64) == 0, Status::INVALID_ARGS);
95 ensure!(buffer.range().start % (self.block_size() as usize) == 0, Status::INVALID_ARGS);
96 ensure!(buffer.range().end % (self.block_size() as usize) == 0, Status::INVALID_ARGS);
97 Ok(self
98 .remote
99 .write_at_with_opts(
100 BufferSlice::new_with_vmo_id(
101 &self.vmoid,
102 buffer.range().start as u64,
103 buffer.len() as u64,
104 ),
105 offset,
106 opts,
107 )
108 .await?)
109 }
110
111 async fn trim(&self, range: Range<u64>) -> Result<(), Error> {
112 if self.read_only {
113 bail!(Status::ACCESS_DENIED);
114 }
115 ensure!(range.start % (self.block_size() as u64) == 0, Status::INVALID_ARGS);
116 ensure!(range.end % (self.block_size() as u64) == 0, Status::INVALID_ARGS);
117 Ok(self.remote.trim(range).await?)
118 }
119
120 async fn close(&self) -> Result<(), Error> {
121 let _ = self.vmoid.take().into_id();
123 Ok(self.remote.close().await?)
124 }
125
126 async fn flush(&self) -> Result<(), Error> {
127 Ok(self.remote.flush().await?)
128 }
129
130 fn barrier(&self) {
131 self.remote.barrier()
132 }
133
134 fn is_read_only(&self) -> bool {
135 self.read_only
136 }
137
138 fn supports_trim(&self) -> bool {
139 self.remote.block_flags().contains(BlockDeviceFlag::TRIM_SUPPORT)
140 }
141}
142
143impl<T> Drop for BlockDevice<T> {
144 fn drop(&mut self) {
145 let _ = self.vmoid.take().into_id();
148 }
149}
150
151#[cfg(test)]
152mod tests {
153 use crate::Device;
154 use crate::block_device::BlockDevice;
155 use fake_block_client::FakeBlockClient;
156 use zx::Status;
157
158 #[fuchsia::test]
159 async fn test_lifecycle() {
160 let device =
161 BlockDevice::new(FakeBlockClient::new(1024, 1024), false).await.expect("new failed");
162
163 {
164 let _buf = device.allocate_buffer(8192).await;
165 }
166
167 device.close().await.expect("Close failed");
168 }
169
170 #[fuchsia::test]
171 async fn test_read_write_buffer() {
172 let device =
173 BlockDevice::new(FakeBlockClient::new(1024, 1024), false).await.expect("new failed");
174
175 {
176 let mut buf1 = device.allocate_buffer(8192).await;
177 let mut buf2 = device.allocate_buffer(1024).await;
178 buf1.as_mut_slice().fill(0xaa as u8);
179 buf2.as_mut_slice().fill(0xbb as u8);
180 device.write(65536, buf1.as_ref()).await.expect("Write failed");
181 device.write(65536 + 8192, buf2.as_ref()).await.expect("Write failed");
182 }
183 {
184 let mut buf = device.allocate_buffer(8192 + 1024).await;
185 device.read(65536, buf.as_mut()).await.expect("Read failed");
186 assert_eq!(buf.as_slice()[..8192], vec![0xaa as u8; 8192]);
187 assert_eq!(buf.as_slice()[8192..], vec![0xbb as u8; 1024]);
188 }
189
190 device.close().await.expect("Close failed");
191 }
192
193 #[fuchsia::test]
194 async fn test_read_only() {
195 let device =
196 BlockDevice::new(FakeBlockClient::new(1024, 1024), true).await.expect("new failed");
197 let mut buf1 = device.allocate_buffer(8192).await;
198 buf1.as_mut_slice().fill(0xaa as u8);
199 let err = device.write(65536, buf1.as_ref()).await.expect_err("Write succeeded");
200 assert_eq!(err.root_cause().downcast_ref::<Status>().unwrap(), &Status::ACCESS_DENIED);
201 }
202
203 #[fuchsia::test]
204 async fn test_unaligned_access() {
205 let device =
206 BlockDevice::new(FakeBlockClient::new(1024, 1024), false).await.expect("new failed");
207 let mut buf1 = device.allocate_buffer(device.block_size() as usize * 2).await;
208 buf1.as_mut_slice().fill(0xaa as u8);
209
210 {
212 let err = device.write(1, buf1.as_ref()).await.expect_err("Write succeeded");
213 assert_eq!(err.root_cause().downcast_ref::<Status>().unwrap(), &Status::INVALID_ARGS);
214 }
215 {
216 let err = device
217 .write(0, buf1.subslice(1..(device.block_size() as usize + 1)))
218 .await
219 .expect_err("Write succeeded");
220 assert_eq!(err.root_cause().downcast_ref::<Status>().unwrap(), &Status::INVALID_ARGS);
221 }
222 {
223 let err = device
224 .write(0, buf1.subslice(1..device.block_size() as usize))
225 .await
226 .expect_err("Write succeeded");
227 assert_eq!(err.root_cause().downcast_ref::<Status>().unwrap(), &Status::INVALID_ARGS);
228 }
229 {
230 let err = device
231 .write(0, buf1.subslice(0..(device.block_size() as usize + 1)))
232 .await
233 .expect_err("Write succeeded");
234 assert_eq!(err.root_cause().downcast_ref::<Status>().unwrap(), &Status::INVALID_ARGS);
235 }
236
237 {
239 let err = device.read(1, buf1.as_mut()).await.expect_err("Read succeeded");
240 assert_eq!(err.root_cause().downcast_ref::<Status>().unwrap(), &Status::INVALID_ARGS);
241 }
242 {
243 let err = device
244 .read(0, buf1.subslice_mut(1..(device.block_size() as usize + 1)))
245 .await
246 .expect_err("Read succeeded");
247 assert_eq!(err.root_cause().downcast_ref::<Status>().unwrap(), &Status::INVALID_ARGS);
248 }
249 {
250 let err = device
251 .read(0, buf1.subslice_mut(1..device.block_size() as usize))
252 .await
253 .expect_err("Read succeeded");
254 assert_eq!(err.root_cause().downcast_ref::<Status>().unwrap(), &Status::INVALID_ARGS);
255 }
256 {
257 let err = device
258 .read(0, buf1.subslice_mut(0..(device.block_size() as usize + 1)))
259 .await
260 .expect_err("Read succeeded");
261 assert_eq!(err.root_cause().downcast_ref::<Status>().unwrap(), &Status::INVALID_ARGS);
262 }
263
264 {
266 let err = device.trim(1..device.block_size() as u64).await.expect_err("Read succeeded");
267 assert_eq!(err.root_cause().downcast_ref::<Status>().unwrap(), &Status::INVALID_ARGS);
268 }
269 {
270 let err =
271 device.trim(1..(device.block_size() as u64 + 1)).await.expect_err("Read succeeded");
272 assert_eq!(err.root_cause().downcast_ref::<Status>().unwrap(), &Status::INVALID_ARGS);
273 }
274 {
275 let err =
276 device.trim(0..(device.block_size() as u64 + 1)).await.expect_err("Read succeeded");
277 assert_eq!(err.root_cause().downcast_ref::<Status>().unwrap(), &Status::INVALID_ARGS);
278 }
279 }
280}