1#![deny(missing_docs)]
8
9use anyhow::{Context as _, Result, anyhow};
10use async_trait::async_trait;
11use fidl::HandleBased as _;
12use fidl_fuchsia_blackout_test::{ControllerRequest, ControllerRequestStream};
13use fidl_fuchsia_device::ControllerMarker;
14use fidl_fuchsia_io as fio;
15use fidl_fuchsia_storage_partitions as fpartitions;
16use fs_management::filesystem::BlockConnector;
17use fuchsia_async as fasync;
18use fuchsia_component::client::{Service, connect_to_protocol, connect_to_protocol_at_path};
19use fuchsia_component::server::{ServiceFs, ServiceObj};
20use fuchsia_fs::directory::readdir;
21use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt, future};
22use rand::distr::StandardUniform;
23use rand::rngs::StdRng;
24use rand::{Rng, SeedableRng};
25use std::pin::pin;
26use std::sync::Arc;
27use storage_isolated_driver_manager::{
28 BlockDeviceMatcher, Guid, create_random_guid, find_block_device, into_guid,
29};
30
31pub mod random_op;
32pub mod static_tree;
33
34#[async_trait]
36pub trait Test {
37 async fn setup(
39 self: Arc<Self>,
40 device_label: String,
41 device_path: Option<String>,
42 seed: u64,
43 ) -> Result<()>;
44 async fn test(
46 self: Arc<Self>,
47 device_label: String,
48 device_path: Option<String>,
49 seed: u64,
50 ) -> Result<()>;
51 async fn verify(
53 self: Arc<Self>,
54 device_label: String,
55 device_path: Option<String>,
56 seed: u64,
57 ) -> Result<()>;
58}
59
60struct BlackoutController(ControllerRequestStream);
61
62pub struct TestServer<'a, T> {
64 fs: ServiceFs<ServiceObj<'a, BlackoutController>>,
65 test: Arc<T>,
66}
67
68impl<'a, T> TestServer<'a, T>
69where
70 T: Test + 'static,
71{
72 pub fn new(test: T) -> Result<TestServer<'a, T>> {
74 let mut fs = ServiceFs::new();
75 fs.dir("svc").add_fidl_service(BlackoutController);
76 fs.take_and_serve_directory_handle()?;
77
78 Ok(TestServer { fs, test: Arc::new(test) })
79 }
80
81 pub async fn serve(self) {
83 const MAX_CONCURRENT: usize = 10_000;
84 let test = self.test;
85 self.fs
86 .for_each_concurrent(MAX_CONCURRENT, move |stream| {
87 handle_request(test.clone(), stream).unwrap_or_else(|e| log::error!("{}", e))
88 })
89 .await;
90 }
91}
92
93async fn handle_request<T: Test + 'static>(
94 test: Arc<T>,
95 BlackoutController(mut stream): BlackoutController,
96) -> Result<()> {
97 while let Some(request) = stream.try_next().await? {
98 handle_controller(test.clone(), request).await?;
99 }
100
101 Ok(())
102}
103
104async fn handle_controller<T: Test + 'static>(
105 test: Arc<T>,
106 request: ControllerRequest,
107) -> Result<()> {
108 match request {
109 ControllerRequest::Setup { responder, device_label, device_path, seed } => {
110 let res = test.setup(device_label, device_path, seed).await.map_err(|err| {
111 log::error!(err:?; "Setup failed");
112 zx::Status::INTERNAL.into_raw()
113 });
114 responder.send(res)?;
115 }
116 ControllerRequest::Test { responder, device_label, device_path, seed, duration } => {
117 let test_fut = test.test(device_label, device_path, seed).map_err(|err| {
118 log::error!(err:?; "Test failed");
119 zx::Status::INTERNAL.into_raw()
120 });
121 if duration != 0 {
122 log::info!("starting test and replying in {} seconds...", duration);
125 let timer = pin!(fasync::Timer::new(std::time::Duration::from_secs(duration)));
126 let res = match future::select(test_fut, timer).await {
127 future::Either::Left((res, _)) => res,
128 future::Either::Right((_, test_fut)) => {
129 fasync::Task::spawn(test_fut.map(|_| ())).detach();
130 Ok(())
131 }
132 };
133 responder.send(res)?;
134 } else {
135 log::info!("starting test...");
137 responder.send(test_fut.await)?;
138 }
139 }
140 ControllerRequest::Verify { responder, device_label, device_path, seed } => {
141 let res = test.verify(device_label, device_path, seed).await.map_err(|e| {
142 log::warn!("{:?}", e);
144 zx::Status::BAD_STATE.into_raw()
145 });
146 responder.send(res)?;
147 }
148 }
149
150 Ok(())
151}
152
153pub fn generate_content(seed: u64) -> Vec<u8> {
155 let mut rng = StdRng::seed_from_u64(seed);
156
157 let size = rng.random_range(1..1 << 16);
158 rng.sample_iter(&StandardUniform).take(size).collect()
159}
160
161pub async fn find_dev(dev: &str) -> Result<String> {
164 let dev_class_block =
165 fuchsia_fs::directory::open_in_namespace("/dev/class/block", fio::PERM_READABLE)?;
166 for entry in readdir(&dev_class_block).await? {
167 let path = format!("/dev/class/block/{}", entry.name);
168 let proxy = connect_to_protocol_at_path::<ControllerMarker>(&path)?;
169 let topo_path = proxy.get_topological_path().await?.map_err(|s| zx::Status::from_raw(s))?;
170 log::info!("{} => {}", path, topo_path);
171 if dev == topo_path {
172 return Ok(path);
173 }
174 }
175 Err(anyhow::anyhow!("Couldn't find {} in /dev/class/block", dev))
176}
177
178pub fn dev() -> fio::DirectoryProxy {
180 fuchsia_fs::directory::open_in_namespace("/dev", fio::PERM_READABLE)
181 .expect("failed to open /dev")
182}
183
184const BLACKOUT_TYPE_GUID: &Guid = &[
187 0x68, 0x45, 0x23, 0x01, 0xab, 0x89, 0xef, 0xcd, 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef,
188];
189
190const GPT_PARTITION_SIZE: u64 = 60 * 1024 * 1024;
191
192pub async fn set_up_partition(device_label: String) -> Result<Box<dyn BlockConnector>> {
196 let partitions = Service::open(fpartitions::PartitionServiceMarker).unwrap();
197 let manager = connect_to_protocol::<fpartitions::PartitionsManagerMarker>().unwrap();
198
199 let service_instances =
200 partitions.clone().enumerate().await.expect("Failed to enumerate partitions");
201 if let Some(connector) =
202 find_block_device(&[BlockDeviceMatcher::Name(&device_label)], service_instances.into_iter())
203 .await
204 .context("Failed to find block device")?
205 {
206 log::info!(device_label:%; "found existing partition");
207 Ok(Box::new(connector))
208 } else {
209 log::info!(device_label:%; "adding new partition to the system gpt");
210 let info =
211 manager.get_block_info().await.expect("FIDL error").expect("get_block_info failed");
212 let transaction = manager
213 .create_transaction()
214 .await
215 .expect("FIDL error")
216 .map_err(zx::Status::from_raw)
217 .expect("create_transaction failed");
218 let request = fpartitions::PartitionsManagerAddPartitionRequest {
219 transaction: Some(transaction.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()),
220 name: Some(device_label.clone()),
221 type_guid: Some(into_guid(BLACKOUT_TYPE_GUID.clone())),
222 instance_guid: Some(into_guid(create_random_guid())),
223 num_blocks: Some(GPT_PARTITION_SIZE / info.1 as u64),
224 ..Default::default()
225 };
226 manager
227 .add_partition(request)
228 .await
229 .expect("FIDL error")
230 .map_err(zx::Status::from_raw)
231 .expect("add_partition failed");
232 manager
233 .commit_transaction(transaction)
234 .await
235 .expect("FIDL error")
236 .map_err(zx::Status::from_raw)
237 .expect("add_partition failed");
238 let service_instances =
239 partitions.enumerate().await.expect("Failed to enumerate partitions");
240 let connector = find_block_device(
241 &[BlockDeviceMatcher::Name(&device_label)],
242 service_instances.into_iter(),
243 )
244 .await
245 .context("Failed to find block device")?
246 .unwrap();
247 Ok(Box::new(connector))
248 }
249}
250
251pub async fn find_partition(device_label: String) -> Result<Box<dyn BlockConnector>> {
253 let partitions = Service::open(fpartitions::PartitionServiceMarker).unwrap();
254 let service_instances = partitions.enumerate().await.expect("Failed to enumerate partitions");
255 let connector = find_block_device(
256 &[BlockDeviceMatcher::Name(&device_label)],
257 service_instances.into_iter(),
258 )
259 .await
260 .context("Failed to find block device")?
261 .ok_or_else(|| anyhow!("Block device not found"))?;
262 log::info!(device_label:%; "found existing partition");
263 Ok(Box::new(connector))
264}