fake_block_client/
fake_block_client.rs1use block_client::{
6 BlockClient, BufferSlice, MutableBufferSlice, ReadOptions, VmoId, WriteOptions,
7};
8use fidl_fuchsia_hardware_block as block;
9use fuchsia_sync::Mutex;
10use std::collections::BTreeMap;
11use std::num::NonZero;
12use std::ops::Range;
13use std::sync::atomic::{self, AtomicU32};
14
15type VmoRegistry = BTreeMap<u16, zx::Vmo>;
16
17struct Inner {
18 data: Vec<u8>,
19 vmo_registry: VmoRegistry,
20}
21
22pub struct FakeBlockClient {
24 inner: Mutex<Inner>,
25 block_size: u32,
26 flush_count: AtomicU32,
27}
28
29impl FakeBlockClient {
30 pub fn new(block_size: u32, block_count: usize) -> Self {
31 Self {
32 inner: Mutex::new(Inner {
33 data: vec![0 as u8; block_size as usize * block_count],
34 vmo_registry: BTreeMap::new(),
35 }),
36 block_size,
37 flush_count: AtomicU32::new(0),
38 }
39 }
40
41 pub fn flush_count(&self) -> u32 {
42 self.flush_count.load(atomic::Ordering::Relaxed)
43 }
44}
45
46impl BlockClient for FakeBlockClient {
47 async fn attach_vmo(&self, vmo: &zx::Vmo) -> Result<VmoId, zx::Status> {
48 let len = vmo.get_size()?;
49 let vmo = vmo.create_child(zx::VmoChildOptions::SLICE, 0, len)?;
50 let mut inner = self.inner.lock();
51 for id in 1..u16::MAX {
53 if let std::collections::btree_map::Entry::Vacant(e) = inner.vmo_registry.entry(id) {
54 e.insert(vmo);
55 return Ok(VmoId::new(id));
56 }
57 }
58 Err(zx::Status::NO_RESOURCES)
59 }
60
61 async fn detach_vmo(&self, vmo_id: VmoId) -> Result<(), zx::Status> {
62 let mut inner = self.inner.lock();
63 let id = vmo_id.into_id();
64 if let None = inner.vmo_registry.remove(&id) { Err(zx::Status::NOT_FOUND) } else { Ok(()) }
65 }
66
67 async fn read_at_with_opts_traced(
68 &self,
69 buffer_slice: MutableBufferSlice<'_>,
70 device_offset: u64,
71 _opts: ReadOptions,
72 _trace_flow_id: u64,
73 ) -> Result<(), zx::Status> {
74 if device_offset % self.block_size as u64 != 0 {
75 return Err(zx::Status::INVALID_ARGS);
76 }
77 let device_offset = device_offset as usize;
78 let inner = &mut *self.inner.lock();
79 match buffer_slice {
80 MutableBufferSlice::VmoId { vmo_id, offset, length } => {
81 if offset % self.block_size as u64 != 0 {
82 return Err(zx::Status::INVALID_ARGS);
83 }
84 if length % self.block_size as u64 != 0 {
85 return Err(zx::Status::INVALID_ARGS);
86 }
87 let vmo = inner.vmo_registry.get(&vmo_id.id()).ok_or(zx::Status::INVALID_ARGS)?;
88 vmo.write(&inner.data[device_offset..device_offset + length as usize], offset)?;
89 Ok(())
90 }
91 MutableBufferSlice::Memory(slice) => {
92 let len = slice.len();
93 if device_offset + len > inner.data.len() {
94 return Err(zx::Status::OUT_OF_RANGE);
95 }
96 slice.copy_from_slice(&inner.data[device_offset..device_offset + len]);
97 Ok(())
98 }
99 }
100 }
101
102 async fn write_at_with_opts_traced(
103 &self,
104 buffer_slice: BufferSlice<'_>,
105 device_offset: u64,
106 _opts: WriteOptions,
107 _trace_flow_id: u64,
108 ) -> Result<(), zx::Status> {
109 if device_offset % self.block_size as u64 != 0 {
110 return Err(zx::Status::INVALID_ARGS);
111 }
112 let device_offset = device_offset as usize;
113 let inner = &mut *self.inner.lock();
114 match buffer_slice {
115 BufferSlice::VmoId { vmo_id, offset, length } => {
116 if offset % self.block_size as u64 != 0 {
117 return Err(zx::Status::INVALID_ARGS);
118 }
119 if length % self.block_size as u64 != 0 {
120 return Err(zx::Status::INVALID_ARGS);
121 }
122 let vmo = inner.vmo_registry.get(&vmo_id.id()).ok_or(zx::Status::INVALID_ARGS)?;
123 vmo.read(&mut inner.data[device_offset..device_offset + length as usize], offset)?;
124 Ok(())
125 }
126 BufferSlice::Memory(slice) => {
127 let len = slice.len();
128 if device_offset + len > inner.data.len() {
129 return Err(zx::Status::OUT_OF_RANGE);
130 }
131 inner.data[device_offset..device_offset + len].copy_from_slice(slice);
132 Ok(())
133 }
134 }
135 }
136
137 async fn trim_traced(&self, range: Range<u64>, _trace_flow_id: u64) -> Result<(), zx::Status> {
138 if range.start % self.block_size as u64 != 0 {
139 return Err(zx::Status::INVALID_ARGS);
140 }
141 if range.end % self.block_size as u64 != 0 {
142 return Err(zx::Status::INVALID_ARGS);
143 }
144 let inner = &mut *self.inner.lock();
146 if range.end as usize > inner.data.len() {
147 return Err(zx::Status::OUT_OF_RANGE);
148 }
149 inner.data[range.start as usize..range.end as usize].fill(0xab);
150 Ok(())
151 }
152
153 async fn flush_traced(&self, _trace_flow_id: u64) -> Result<(), zx::Status> {
154 self.flush_count.fetch_add(1, atomic::Ordering::Relaxed);
155 Ok(())
156 }
157
158 fn barrier(&self) {}
160
161 async fn close(&self) -> Result<(), zx::Status> {
162 Ok(())
163 }
164
165 fn block_size(&self) -> u32 {
166 self.block_size
167 }
168
169 fn block_count(&self) -> u64 {
170 self.inner.lock().data.len() as u64 / self.block_size as u64
171 }
172
173 fn max_transfer_blocks(&self) -> Option<NonZero<u32>> {
174 None
175 }
176
177 fn block_flags(&self) -> block::Flag {
178 block::Flag::TRIM_SUPPORT
179 }
180
181 fn is_connected(&self) -> bool {
182 true
183 }
184}