fake_block_client/
fake_block_client.rs1use async_trait::async_trait;
6use block_client::{BlockClient, BufferSlice, MutableBufferSlice, VmoId, WriteOptions};
7use fidl_fuchsia_hardware_block as block;
8use std::collections::BTreeMap;
9use std::ops::Range;
10use std::sync::atomic::{self, AtomicU32};
11use std::sync::Mutex;
12
13type VmoRegistry = BTreeMap<u16, zx::Vmo>;
14
15struct Inner {
16 data: Vec<u8>,
17 vmo_registry: VmoRegistry,
18}
19
20pub struct FakeBlockClient {
22 inner: Mutex<Inner>,
23 block_size: u32,
24 flush_count: AtomicU32,
25}
26
27impl FakeBlockClient {
28 pub fn new(block_size: u32, block_count: usize) -> Self {
29 Self {
30 inner: Mutex::new(Inner {
31 data: vec![0 as u8; block_size as usize * block_count],
32 vmo_registry: BTreeMap::new(),
33 }),
34 block_size,
35 flush_count: AtomicU32::new(0),
36 }
37 }
38
39 pub fn flush_count(&self) -> u32 {
40 self.flush_count.load(atomic::Ordering::Relaxed)
41 }
42}
43
44#[async_trait]
45impl BlockClient for FakeBlockClient {
46 async fn attach_vmo(&self, vmo: &zx::Vmo) -> Result<VmoId, zx::Status> {
47 let len = vmo.get_size()?;
48 let vmo = vmo.create_child(zx::VmoChildOptions::SLICE, 0, len)?;
49 let mut inner = self.inner.lock().unwrap();
50 for id in 1..u16::MAX {
52 if let std::collections::btree_map::Entry::Vacant(e) = inner.vmo_registry.entry(id) {
53 e.insert(vmo);
54 return Ok(VmoId::new(id));
55 }
56 }
57 Err(zx::Status::NO_RESOURCES)
58 }
59
60 async fn detach_vmo(&self, vmo_id: VmoId) -> Result<(), zx::Status> {
61 let mut inner = self.inner.lock().unwrap();
62 let id = vmo_id.into_id();
63 if let None = inner.vmo_registry.remove(&id) {
64 Err(zx::Status::NOT_FOUND)
65 } else {
66 Ok(())
67 }
68 }
69
70 async fn read_at_traced(
71 &self,
72 buffer_slice: MutableBufferSlice<'_>,
73 device_offset: u64,
74 _trace_flow_id: u64,
75 ) -> Result<(), zx::Status> {
76 if device_offset % self.block_size as u64 != 0 {
77 return Err(zx::Status::INVALID_ARGS);
78 }
79 let device_offset = device_offset as usize;
80 let inner = &mut *self.inner.lock().unwrap();
81 match buffer_slice {
82 MutableBufferSlice::VmoId { vmo_id, offset, length } => {
83 if offset % self.block_size as u64 != 0 {
84 return Err(zx::Status::INVALID_ARGS);
85 }
86 if length % self.block_size as u64 != 0 {
87 return Err(zx::Status::INVALID_ARGS);
88 }
89 let vmo = inner.vmo_registry.get(&vmo_id.id()).ok_or(zx::Status::INVALID_ARGS)?;
90 vmo.write(&inner.data[device_offset..device_offset + length as usize], offset)?;
91 Ok(())
92 }
93 MutableBufferSlice::Memory(slice) => {
94 let len = slice.len();
95 if device_offset + len > inner.data.len() {
96 return Err(zx::Status::OUT_OF_RANGE);
97 }
98 slice.copy_from_slice(&inner.data[device_offset..device_offset + len]);
99 Ok(())
100 }
101 }
102 }
103
104 async fn write_at_with_opts_traced(
105 &self,
106 buffer_slice: BufferSlice<'_>,
107 device_offset: u64,
108 _opts: WriteOptions,
109 _trace_flow_id: u64,
110 ) -> Result<(), zx::Status> {
111 if device_offset % self.block_size as u64 != 0 {
112 return Err(zx::Status::INVALID_ARGS);
113 }
114 let device_offset = device_offset as usize;
115 let inner = &mut *self.inner.lock().unwrap();
116 match buffer_slice {
117 BufferSlice::VmoId { vmo_id, offset, length } => {
118 if offset % self.block_size as u64 != 0 {
119 return Err(zx::Status::INVALID_ARGS);
120 }
121 if length % self.block_size as u64 != 0 {
122 return Err(zx::Status::INVALID_ARGS);
123 }
124 let vmo = inner.vmo_registry.get(&vmo_id.id()).ok_or(zx::Status::INVALID_ARGS)?;
125 vmo.read(&mut inner.data[device_offset..device_offset + length as usize], offset)?;
126 Ok(())
127 }
128 BufferSlice::Memory(slice) => {
129 let len = slice.len();
130 if device_offset + len > inner.data.len() {
131 return Err(zx::Status::OUT_OF_RANGE);
132 }
133 inner.data[device_offset..device_offset + len].copy_from_slice(slice);
134 Ok(())
135 }
136 }
137 }
138
139 async fn trim_traced(&self, range: Range<u64>, _trace_flow_id: u64) -> Result<(), zx::Status> {
140 if range.start % self.block_size as u64 != 0 {
141 return Err(zx::Status::INVALID_ARGS);
142 }
143 if range.end % self.block_size as u64 != 0 {
144 return Err(zx::Status::INVALID_ARGS);
145 }
146 let inner = &mut *self.inner.lock().unwrap();
148 if range.end as usize > inner.data.len() {
149 return Err(zx::Status::OUT_OF_RANGE);
150 }
151 inner.data[range.start as usize..range.end as usize].fill(0xab);
152 Ok(())
153 }
154
155 async fn flush_traced(&self, _trace_flow_id: u64) -> Result<(), zx::Status> {
156 self.flush_count.fetch_add(1, atomic::Ordering::Relaxed);
157 Ok(())
158 }
159
160 async fn close(&self) -> Result<(), zx::Status> {
161 Ok(())
162 }
163
164 fn block_size(&self) -> u32 {
165 self.block_size
166 }
167
168 fn block_count(&self) -> u64 {
169 self.inner.lock().unwrap().data.len() as u64 / self.block_size as u64
170 }
171
172 fn block_flags(&self) -> block::Flag {
173 block::Flag::TRIM_SUPPORT
174 }
175
176 fn is_connected(&self) -> bool {
177 true
178 }
179}