storage_device/
block_device.rs

1// Copyright 2021 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use crate::Device;
6use crate::buffer::{BufferFuture, BufferRef, MutableBufferRef};
7use crate::buffer_allocator::{BufferAllocator, BufferSource};
8use anyhow::{Error, bail, ensure};
9use async_trait::async_trait;
10use block_client::{
11    BlockClient, BlockFlags, BufferSlice, MutableBufferSlice, ReadOptions, VmoId, WriteOptions,
12};
13use std::ops::Range;
14use zx::Status;
15
16/// BlockDevice is an implementation of Device backed by a real block device behind a FIFO.
17pub struct BlockDevice<T> {
18    allocator: BufferAllocator,
19    remote: T,
20    read_only: bool,
21    vmoid: VmoId,
22}
23
24const TRANSFER_VMO_SIZE: usize = 128 * 1024 * 1024;
25
26impl<T: BlockClient> BlockDevice<T> {
27    /// Creates a new BlockDevice over |remote|.
28    pub async fn new(remote: T, read_only: bool) -> Result<Self, Error> {
29        let buffer_source = BufferSource::new(TRANSFER_VMO_SIZE);
30        let vmoid = remote.attach_vmo(buffer_source.vmo()).await?;
31        let allocator = BufferAllocator::new(remote.block_size() as usize, buffer_source);
32        Ok(Self { allocator, remote, read_only, vmoid })
33    }
34}
35
36#[async_trait]
37impl<T: BlockClient> Device for BlockDevice<T> {
38    fn allocate_buffer(&self, size: usize) -> BufferFuture<'_> {
39        self.allocator.allocate_buffer(size)
40    }
41
42    fn block_size(&self) -> u32 {
43        self.remote.block_size()
44    }
45
46    fn block_count(&self) -> u64 {
47        self.remote.block_count()
48    }
49
50    async fn read_with_opts(
51        &self,
52        offset: u64,
53        buffer: MutableBufferRef<'_>,
54        read_opts: ReadOptions,
55    ) -> Result<(), Error> {
56        if buffer.len() == 0 {
57            return Ok(());
58        }
59        ensure!(self.vmoid.is_valid(), Status::INVALID_ARGS);
60        ensure!(offset % (self.block_size() as u64) == 0, Status::INVALID_ARGS);
61        ensure!(buffer.range().start % (self.block_size() as usize) == 0, Status::INVALID_ARGS);
62        ensure!(buffer.range().end % (self.block_size() as usize) == 0, Status::INVALID_ARGS);
63        Ok(self
64            .remote
65            .read_at_with_opts(
66                MutableBufferSlice::new_with_vmo_id(
67                    &self.vmoid,
68                    buffer.range().start as u64,
69                    buffer.len() as u64,
70                ),
71                offset,
72                read_opts,
73            )
74            .await?)
75    }
76
77    async fn write_with_opts(
78        &self,
79        offset: u64,
80        buffer: BufferRef<'_>,
81        opts: WriteOptions,
82    ) -> Result<(), Error> {
83        if self.read_only {
84            bail!(Status::ACCESS_DENIED);
85        }
86        if buffer.len() == 0 {
87            return Ok(());
88        }
89        ensure!(self.vmoid.is_valid(), "Device is closed");
90        ensure!(offset % (self.block_size() as u64) == 0, Status::INVALID_ARGS);
91        ensure!(buffer.range().start % (self.block_size() as usize) == 0, Status::INVALID_ARGS);
92        ensure!(buffer.range().end % (self.block_size() as usize) == 0, Status::INVALID_ARGS);
93        Ok(self
94            .remote
95            .write_at_with_opts(
96                BufferSlice::new_with_vmo_id(
97                    &self.vmoid,
98                    buffer.range().start as u64,
99                    buffer.len() as u64,
100                ),
101                offset,
102                opts,
103            )
104            .await?)
105    }
106
107    async fn trim(&self, range: Range<u64>) -> Result<(), Error> {
108        if self.read_only {
109            bail!(Status::ACCESS_DENIED);
110        }
111        ensure!(range.start % (self.block_size() as u64) == 0, Status::INVALID_ARGS);
112        ensure!(range.end % (self.block_size() as u64) == 0, Status::INVALID_ARGS);
113        Ok(self.remote.trim(range).await?)
114    }
115
116    async fn close(&self) -> Result<(), Error> {
117        // We can leak the VMO id because we are closing the device.
118        let _ = self.vmoid.take().into_id();
119        Ok(self.remote.close().await?)
120    }
121
122    async fn flush(&self) -> Result<(), Error> {
123        Ok(self.remote.flush().await?)
124    }
125
126    fn barrier(&self) {
127        self.remote.barrier()
128    }
129
130    fn is_read_only(&self) -> bool {
131        self.read_only
132    }
133
134    fn supports_trim(&self) -> bool {
135        self.remote.block_flags().contains(BlockFlags::TRIM_SUPPORT)
136    }
137}
138
139impl<T> Drop for BlockDevice<T> {
140    fn drop(&mut self) {
141        // We can't detach the VmoId because we're not async here, but we are tearing down the
142        // connection to the block device so we don't really need to.
143        let _ = self.vmoid.take().into_id();
144    }
145}
146
147#[cfg(test)]
148mod tests {
149    use crate::Device;
150    use crate::block_device::BlockDevice;
151    use fake_block_client::FakeBlockClient;
152    use zx::Status;
153
154    #[fuchsia::test]
155    async fn test_lifecycle() {
156        let device =
157            BlockDevice::new(FakeBlockClient::new(1024, 1024), false).await.expect("new failed");
158
159        {
160            let _buf = device.allocate_buffer(8192).await;
161        }
162
163        device.close().await.expect("Close failed");
164    }
165
166    #[fuchsia::test]
167    async fn test_read_write_buffer() {
168        let device =
169            BlockDevice::new(FakeBlockClient::new(1024, 1024), false).await.expect("new failed");
170
171        {
172            let mut buf1 = device.allocate_buffer(8192).await;
173            let mut buf2 = device.allocate_buffer(1024).await;
174            buf1.as_mut_slice().fill(0xaa as u8);
175            buf2.as_mut_slice().fill(0xbb as u8);
176            device.write(65536, buf1.as_ref()).await.expect("Write failed");
177            device.write(65536 + 8192, buf2.as_ref()).await.expect("Write failed");
178        }
179        {
180            let mut buf = device.allocate_buffer(8192 + 1024).await;
181            device.read(65536, buf.as_mut()).await.expect("Read failed");
182            assert_eq!(buf.as_slice()[..8192], vec![0xaa as u8; 8192]);
183            assert_eq!(buf.as_slice()[8192..], vec![0xbb as u8; 1024]);
184        }
185
186        device.close().await.expect("Close failed");
187    }
188
189    #[fuchsia::test]
190    async fn test_read_only() {
191        let device =
192            BlockDevice::new(FakeBlockClient::new(1024, 1024), true).await.expect("new failed");
193        let mut buf1 = device.allocate_buffer(8192).await;
194        buf1.as_mut_slice().fill(0xaa as u8);
195        let err = device.write(65536, buf1.as_ref()).await.expect_err("Write succeeded");
196        assert_eq!(err.root_cause().downcast_ref::<Status>().unwrap(), &Status::ACCESS_DENIED);
197    }
198
199    #[fuchsia::test]
200    async fn test_unaligned_access() {
201        let device =
202            BlockDevice::new(FakeBlockClient::new(1024, 1024), false).await.expect("new failed");
203        let mut buf1 = device.allocate_buffer(device.block_size() as usize * 2).await;
204        buf1.as_mut_slice().fill(0xaa as u8);
205
206        // Write checks
207        {
208            let err = device.write(1, buf1.as_ref()).await.expect_err("Write succeeded");
209            assert_eq!(err.root_cause().downcast_ref::<Status>().unwrap(), &Status::INVALID_ARGS);
210        }
211        {
212            let err = device
213                .write(0, buf1.subslice(1..(device.block_size() as usize + 1)))
214                .await
215                .expect_err("Write succeeded");
216            assert_eq!(err.root_cause().downcast_ref::<Status>().unwrap(), &Status::INVALID_ARGS);
217        }
218        {
219            let err = device
220                .write(0, buf1.subslice(1..device.block_size() as usize))
221                .await
222                .expect_err("Write succeeded");
223            assert_eq!(err.root_cause().downcast_ref::<Status>().unwrap(), &Status::INVALID_ARGS);
224        }
225        {
226            let err = device
227                .write(0, buf1.subslice(0..(device.block_size() as usize + 1)))
228                .await
229                .expect_err("Write succeeded");
230            assert_eq!(err.root_cause().downcast_ref::<Status>().unwrap(), &Status::INVALID_ARGS);
231        }
232
233        // Read checks
234        {
235            let err = device.read(1, buf1.as_mut()).await.expect_err("Read succeeded");
236            assert_eq!(err.root_cause().downcast_ref::<Status>().unwrap(), &Status::INVALID_ARGS);
237        }
238        {
239            let err = device
240                .read(0, buf1.subslice_mut(1..(device.block_size() as usize + 1)))
241                .await
242                .expect_err("Read succeeded");
243            assert_eq!(err.root_cause().downcast_ref::<Status>().unwrap(), &Status::INVALID_ARGS);
244        }
245        {
246            let err = device
247                .read(0, buf1.subslice_mut(1..device.block_size() as usize))
248                .await
249                .expect_err("Read succeeded");
250            assert_eq!(err.root_cause().downcast_ref::<Status>().unwrap(), &Status::INVALID_ARGS);
251        }
252        {
253            let err = device
254                .read(0, buf1.subslice_mut(0..(device.block_size() as usize + 1)))
255                .await
256                .expect_err("Read succeeded");
257            assert_eq!(err.root_cause().downcast_ref::<Status>().unwrap(), &Status::INVALID_ARGS);
258        }
259
260        // Trim
261        {
262            let err = device.trim(1..device.block_size() as u64).await.expect_err("Read succeeded");
263            assert_eq!(err.root_cause().downcast_ref::<Status>().unwrap(), &Status::INVALID_ARGS);
264        }
265        {
266            let err =
267                device.trim(1..(device.block_size() as u64 + 1)).await.expect_err("Read succeeded");
268            assert_eq!(err.root_cause().downcast_ref::<Status>().unwrap(), &Status::INVALID_ARGS);
269        }
270        {
271            let err =
272                device.trim(0..(device.block_size() as u64 + 1)).await.expect_err("Read succeeded");
273            assert_eq!(err.root_cause().downcast_ref::<Status>().unwrap(), &Status::INVALID_ARGS);
274        }
275    }
276}