storage_device/
ranged_device.rs

1// Copyright 2025 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use crate::buffer::{BufferRef, MutableBufferRef};
6use crate::buffer_allocator::BufferFuture;
7use crate::{Device, ReadOptions, WriteOptions};
8use anyhow::{Error, anyhow, bail, ensure};
9use async_trait::async_trait;
10use std::ops::Range;
11use std::sync::Arc;
12
13/// Wrapper around a Device where we can only access a region within it.
14pub struct RangedDevice {
15    // The underlying device.
16    source: Arc<dyn Device>,
17    // Range (in bytes) of the accessible region in `source`.
18    range: Range<u64>,
19}
20
21impl RangedDevice {
22    /// Creates a new RangedDevice covering the given byte range of the source device.
23    /// The range must be block-aligned.
24    pub fn new(source: Arc<dyn Device>, range: Range<u64>) -> Result<Self, Error> {
25        let block_size = source.block_size() as u64;
26        ensure!(range.start % block_size == 0, "range.start must be block aligned");
27        ensure!(range.end % block_size == 0, "range.end must be block aligned");
28        ensure!(
29            range.end <= source.block_count() * block_size,
30            "failed to create RangedDevice (out of range)"
31        );
32        ensure!(range.end > range.start, "failed to create RangedDevice (no size)");
33        Ok(Self { source: source.clone(), range })
34    }
35
36    fn num_blocks(&self) -> u64 {
37        (self.range.end - self.range.start) / self.block_size() as u64
38    }
39}
40
41#[async_trait]
42impl Device for RangedDevice {
43    fn allocate_buffer(&self, size: usize) -> BufferFuture<'_> {
44        self.source.allocate_buffer(size)
45    }
46
47    fn block_size(&self) -> u32 {
48        self.source.block_size()
49    }
50
51    fn block_count(&self) -> u64 {
52        self.num_blocks()
53    }
54
55    async fn read_with_opts(
56        &self,
57        offset: u64,
58        buffer: MutableBufferRef<'_>,
59        _read_opts: ReadOptions,
60    ) -> Result<(), Error> {
61        let adjusted_offset = self
62            .range
63            .start
64            .checked_add(offset)
65            .ok_or_else(|| anyhow!("arithmetic overflow calculating offset"))?;
66        ensure!(
67            adjusted_offset + buffer.len() as u64 <= self.range.end,
68            "reading past end of device"
69        );
70        self.source.read(adjusted_offset, buffer).await
71    }
72
73    async fn write_with_opts(
74        &self,
75        offset: u64,
76        buffer: BufferRef<'_>,
77        opts: WriteOptions,
78    ) -> Result<(), Error> {
79        let adjusted_offset = self
80            .range
81            .start
82            .checked_add(offset)
83            .ok_or_else(|| anyhow!("arithmetic overflow calculating offset"))?;
84        ensure!(
85            adjusted_offset + buffer.len() as u64 <= self.range.end,
86            "writing past end of device"
87        );
88        self.source.write_with_opts(adjusted_offset, buffer, opts).await
89    }
90
91    async fn trim(&self, _range: Range<u64>) -> Result<(), Error> {
92        bail!("RangedDevice does not support trim");
93    }
94
95    async fn close(&self) -> Result<(), Error> {
96        self.source.close().await
97    }
98
99    async fn flush(&self) -> Result<(), Error> {
100        self.source.flush().await
101    }
102
103    fn barrier(&self) {
104        self.source.barrier()
105    }
106
107    fn is_read_only(&self) -> bool {
108        self.source.is_read_only()
109    }
110
111    fn supports_trim(&self) -> bool {
112        false
113    }
114
115    fn reopen(&self, read_only: bool) {
116        self.source.reopen(read_only)
117    }
118}
119
120#[cfg(test)]
121mod tests {
122    use super::RangedDevice;
123    use crate::Device;
124    use crate::fake_device::FakeDevice;
125    use std::sync::Arc;
126
127    #[fuchsia::test]
128    async fn test_ranged_device_reads() {
129        const BLOCK_SIZE: usize = 512;
130        let device = Arc::new(FakeDevice::new(8, BLOCK_SIZE as u32));
131
132        let mut buffer = device.allocate_buffer(BLOCK_SIZE).await;
133        buffer.as_mut_slice().copy_from_slice(&[1; 512]);
134        device.write(BLOCK_SIZE as u64, buffer.as_ref()).await.expect("failed to write to device");
135
136        buffer.as_mut_slice().copy_from_slice(&[2; 512]);
137        device
138            .write(2 * BLOCK_SIZE as u64, buffer.as_ref())
139            .await
140            .expect("failed to write to device");
141
142        // Create a RangedDevice starting from block offset one, for three blocks.
143        let sub_device =
144            RangedDevice::new(device.clone(), BLOCK_SIZE as u64..4 * BLOCK_SIZE as u64)
145                .expect("failed to create new RangedDevice");
146
147        // Test reading from RangedDevice
148        let mut ranged_device_buffer = sub_device.allocate_buffer(BLOCK_SIZE).await;
149        sub_device
150            .read(0, ranged_device_buffer.as_mut())
151            .await
152            .expect("failed to read from RangedDevice");
153        assert_eq!(ranged_device_buffer.as_slice(), [1; 512]);
154
155        sub_device
156            .read(BLOCK_SIZE as u64, ranged_device_buffer.as_mut())
157            .await
158            .expect("failed to read from RangedDevice");
159        assert_eq!(ranged_device_buffer.as_slice(), [2; 512]);
160
161        sub_device
162            .read(2 * BLOCK_SIZE as u64, ranged_device_buffer.as_mut())
163            .await
164            .expect("failed to read from RangedDevice");
165        assert_eq!(ranged_device_buffer.as_slice(), [0; 512]);
166
167        sub_device
168            .read(3 * BLOCK_SIZE as u64, ranged_device_buffer.as_mut())
169            .await
170            .expect_err("unexepectedly passed reading out of range of RangedDevice");
171    }
172
173    #[fuchsia::test]
174    async fn test_ranged_device_writes() {
175        const BLOCK_SIZE: usize = 512;
176        let device = Arc::new(FakeDevice::new(8, BLOCK_SIZE as u32));
177
178        // Create a RangedDevice starting from block offset one, for three blocks.
179        let block_offset = 1;
180        let sub_device = RangedDevice::new(
181            device.clone(),
182            block_offset * BLOCK_SIZE as u64..(block_offset + 3) * BLOCK_SIZE as u64,
183        )
184        .expect("failed to create new RangedDevice");
185
186        let mut invalid_buffer = sub_device.allocate_buffer(4 * BLOCK_SIZE).await;
187        invalid_buffer.as_mut_slice().copy_from_slice(&[3; 2048]);
188        sub_device
189            .write(0, invalid_buffer.as_ref())
190            .await
191            .expect_err("unexpectedly passed writing a buffer that is too big");
192
193        let mut write_buffer = sub_device.allocate_buffer(BLOCK_SIZE).await;
194        write_buffer.as_mut_slice().copy_from_slice(&[3; 512]);
195        let write_block_offset = 2;
196        sub_device
197            .write(write_block_offset * BLOCK_SIZE as u64, write_buffer.as_ref())
198            .await
199            .expect("failed to write to RangedDevice");
200
201        // Verify write on underlying device.
202        let mut read_buffer = device.allocate_buffer(BLOCK_SIZE).await;
203        device
204            .read((block_offset + write_block_offset) * BLOCK_SIZE as u64, read_buffer.as_mut())
205            .await
206            .expect("failed to read from device");
207        assert_eq!(read_buffer.as_slice(), [3; 512]);
208    }
209}