1#![deny(missing_docs)]
8
9use anyhow::{bail, Context, Error};
10use block_client::cache::Cache;
11use block_client::RemoteBlockClientSync;
12use byteorder::{LittleEndian, WriteBytesExt};
13use fidl_fuchsia_hardware_block::BlockMarker;
14use fidl_fuchsia_io as fio;
15use fuchsia_fs::directory::{readdir_recursive, DirEntry, DirentKind};
16use futures::StreamExt;
17use std::io::Write;
18
19const FACTORYFS_MAGIC: u64 = 0xa55d3ff91e694d21;
20const BLOCK_SIZE: u32 = 4096;
21const DIRENT_START_BLOCK: u32 = 1;
22const SUPERBLOCK_DATA_SIZE: u32 = 52;
26const FACTORYFS_MAJOR_VERSION: u32 = 1;
27const FACTORYFS_MINOR_VERSION: u32 = 0;
28
29fn round_up_to_align(x: u32, align: u32) -> u32 {
31 debug_assert_ne!(align, 0);
32 debug_assert_eq!(align & (align - 1), 0);
33 (x + align - 1) & !(align - 1)
34}
35
36fn num_blocks(bytes: u32) -> u32 {
41 (bytes + BLOCK_SIZE - 1) / BLOCK_SIZE
43}
44
45fn round_up_to_block_size(bytes: u32) -> u32 {
48 num_blocks(bytes) * BLOCK_SIZE
49}
50
51fn block_align<Writer>(writer: &mut Writer, written_bytes: u32) -> Result<(), Error>
55where
56 Writer: Write,
57{
58 let fill = round_up_to_block_size(written_bytes) - written_bytes;
59 for _ in 0..fill {
60 writer.write_u8(0)?;
61 }
62 Ok(())
63}
64
65struct FactoryFS {
67 major_version: u32,
68 minor_version: u32,
69 flags: u32,
70 block_size: u32,
71 entries: Vec<DirectoryEntry>,
72}
73
74impl FactoryFS {
75 fn serialize_superblock<Writer>(&self, writer: &mut Writer) -> Result<(u32, u32), Error>
76 where
77 Writer: Write,
78 {
79 writer.write_u64::<LittleEndian>(FACTORYFS_MAGIC).context("failed to write magic")?;
106 writer.write_u32::<LittleEndian>(self.major_version)?;
107 writer.write_u32::<LittleEndian>(self.minor_version)?;
108 writer.write_u32::<LittleEndian>(self.flags)?;
109
110 let data_blocks = self
112 .entries
113 .iter()
114 .fold(0, |blocks, entry| blocks + num_blocks(entry.data.len() as u32));
115 writer.write_u32::<LittleEndian>(data_blocks)?;
116
117 let entries_bytes = self.entries.iter().fold(0, |size, entry| size + entry.metadata_size());
119 let entries_blocks = num_blocks(entries_bytes);
120 writer.write_u32::<LittleEndian>(entries_bytes)?;
121 writer.write_u32::<LittleEndian>(self.entries.len() as u32)?;
122
123 writer.write_u64::<LittleEndian>(0)?;
125
126 writer.write_u32::<LittleEndian>(self.block_size)?;
127
128 writer.write_u32::<LittleEndian>(entries_blocks)?;
129 writer.write_u32::<LittleEndian>(DIRENT_START_BLOCK)?;
130
131 Ok((entries_bytes, entries_blocks))
132 }
133
134 fn serialize<Writer>(&self, writer: &mut Writer) -> Result<(), Error>
142 where
143 Writer: Write,
144 {
145 let (entries_bytes, entries_blocks) =
146 self.serialize_superblock(writer).context("failed to serialize superblock")?;
147
148 block_align(writer, SUPERBLOCK_DATA_SIZE)?;
150
151 let mut data_offset = DIRENT_START_BLOCK + entries_blocks;
153 for entry in &self.entries {
155 entry.serialize_metadata(writer, data_offset)?;
156 data_offset += num_blocks(entry.data.len() as u32);
157 }
158
159 block_align(writer, entries_bytes)?;
160
161 for entry in &self.entries {
163 entry.serialize_data(writer)?;
164 }
165
166 Ok(())
167 }
168}
169
170#[derive(Debug, PartialEq, Eq)]
173struct DirectoryEntry {
174 name: Vec<u8>,
175 data: Vec<u8>,
176}
177
178impl DirectoryEntry {
179 fn metadata_size(&self) -> u32 {
181 let name_len = self.name.len() as u32;
182 let padding = round_up_to_align(name_len, 4) - name_len;
183
184 4
186 + 4
188 + 4
190 + name_len
192 + padding
194 }
195
196 fn serialize_metadata<Writer>(&self, writer: &mut Writer, data_offset: u32) -> Result<(), Error>
200 where
201 Writer: Write,
202 {
203 let name_len = self.name.len() as u32;
224 writer.write_u32::<LittleEndian>(name_len)?;
225 writer.write_u32::<LittleEndian>(self.data.len() as u32)?;
226 writer.write_u32::<LittleEndian>(data_offset)?;
227 writer.write_all(&self.name)?;
228
229 let padding = round_up_to_align(name_len, 4) - name_len;
231 for _ in 0..padding {
232 writer.write_u8(0)?;
233 }
234
235 Ok(())
236 }
237
238 fn serialize_data<Writer>(&self, writer: &mut Writer) -> Result<(), Error>
243 where
244 Writer: Write,
245 {
246 writer.write_all(&self.data)?;
247 block_align(writer, self.data.len() as u32)?;
248 Ok(())
249 }
250}
251
252async fn get_entries(dir: &fio::DirectoryProxy) -> Result<Vec<DirectoryEntry>, Error> {
253 let out: Vec<DirEntry> = readdir_recursive(dir, None).map(|x| x.unwrap()).collect().await;
254
255 let mut entries = vec![];
256 for ent in out {
257 if ent.kind != DirentKind::File {
258 bail!("Directory entry '{}' is not a file. FactoryFS can only contain files.", ent.name)
261 }
262
263 let (file_proxy, server_end) = fidl::endpoints::create_proxy::<fio::FileMarker>();
266 dir.open(
267 &ent.name,
268 fio::PERM_READABLE | fio::Flags::PROTOCOL_FILE,
269 &Default::default(),
270 server_end.into_channel(),
271 )
272 .with_context(|| format!("failed to open file {}", ent.name))?;
273 let (status, attrs) = file_proxy.get_attr().await.with_context(|| {
274 format!("failed to get attributes of file {}: (fidl failure)", ent.name)
275 })?;
276 if zx::Status::from_raw(status) != zx::Status::OK {
277 bail!("failed to get attributes of file {}", ent.name);
278 }
279 let data = file_proxy
280 .read(attrs.content_size)
281 .await
282 .with_context(|| {
283 format!("failed to read contents of file {}: (fidl failure)", ent.name)
284 })?
285 .map_err(zx::Status::from_raw)
286 .with_context(|| format!("failed to read contents of file {}", ent.name))?;
287
288 entries.push(DirectoryEntry { name: ent.name.as_bytes().to_vec(), data });
289 }
290
291 entries.sort_by(|a, b| a.name.cmp(&b.name));
292
293 Ok(entries)
294}
295
296async fn write_directory<W: Write>(dir: &fio::DirectoryProxy, device: &mut W) -> Result<(), Error> {
297 let entries = get_entries(dir).await.context("failed to get entries from directory")?;
298
299 let factoryfs = FactoryFS {
300 major_version: FACTORYFS_MAJOR_VERSION,
301 minor_version: FACTORYFS_MINOR_VERSION,
302 flags: 0,
303 block_size: BLOCK_SIZE,
304 entries,
305 };
306
307 factoryfs.serialize(device).context("failed to serialize factoryfs")?;
308
309 Ok(())
310}
311
312pub async fn export_directory(
318 dir: &fio::DirectoryProxy,
319 client_end: fidl::endpoints::ClientEnd<BlockMarker>,
320) -> Result<(), Error> {
321 let device = RemoteBlockClientSync::new(client_end)
322 .context("failed to create remote block device client")?;
323 let mut device = Cache::new(device).context("failed to create cache layer for block device")?;
324
325 write_directory(dir, &mut device).await.context("failed to write out directory")?;
326
327 device.flush().context("failed to flush to device")?;
328
329 Ok(())
330}
331
332#[cfg(test)]
333mod tests {
334 use super::{
335 block_align, export_directory, get_entries, num_blocks, round_up_to_block_size,
336 DirectoryEntry, FactoryFS, BLOCK_SIZE, FACTORYFS_MAJOR_VERSION, FACTORYFS_MINOR_VERSION,
337 SUPERBLOCK_DATA_SIZE,
338 };
339
340 use assert_matches::assert_matches;
341 use fidl_fuchsia_io as fio;
342 use ramdevice_client::RamdiskClient;
343 use vfs::file::vmo::read_only;
344 use vfs::pseudo_directory;
345
346 #[test]
347 fn test_num_blocks() {
348 assert_eq!(num_blocks(0), 0);
349 assert_eq!(num_blocks(1), 1);
350 assert_eq!(num_blocks(10), 1);
351 assert_eq!(num_blocks(BLOCK_SIZE - 1), 1);
352 assert_eq!(num_blocks(BLOCK_SIZE), 1);
353 assert_eq!(num_blocks(BLOCK_SIZE + 1), 2);
354 }
355
356 #[test]
357 fn test_round_up() {
358 assert_eq!(round_up_to_block_size(0), 0);
359 assert_eq!(round_up_to_block_size(1), BLOCK_SIZE);
360 assert_eq!(round_up_to_block_size(BLOCK_SIZE - 1), BLOCK_SIZE);
361 assert_eq!(round_up_to_block_size(BLOCK_SIZE), BLOCK_SIZE);
362 assert_eq!(round_up_to_block_size(BLOCK_SIZE + 1), BLOCK_SIZE * 2);
363 }
364
365 #[test]
366 fn test_block_align() {
367 let mut cases = vec![
368 (0, 0),
370 (1, BLOCK_SIZE - 1),
371 (BLOCK_SIZE - 1, 1),
372 (BLOCK_SIZE, 0),
373 (BLOCK_SIZE + 1, BLOCK_SIZE - 1),
374 ];
375
376 for case in &mut cases {
377 let mut w = vec![];
378 assert_matches!(block_align(&mut w, case.0), Ok(()));
379 assert_eq!(w.len(), case.1 as usize);
380 assert!(w.into_iter().all(|v| v == 0));
381 }
382 }
383
384 #[test]
385 fn test_superblock_data() {
386 let name = "test_name".as_bytes();
387 let data = vec![1, 2, 3, 4, 5];
388
389 let entry = DirectoryEntry { name: name.to_owned(), data: data.clone() };
390
391 let metadata_size = entry.metadata_size();
392 let metadata_blocks = num_blocks(metadata_size);
393
394 let factoryfs = FactoryFS {
395 major_version: FACTORYFS_MAJOR_VERSION,
396 minor_version: FACTORYFS_MINOR_VERSION,
397 flags: 0,
398 block_size: BLOCK_SIZE,
399 entries: vec![entry],
400 };
401
402 let mut out = vec![];
403 assert_eq!(
404 factoryfs.serialize_superblock(&mut out).unwrap(),
405 (metadata_size, metadata_blocks),
406 );
407
408 assert_eq!(out.len() as u32, SUPERBLOCK_DATA_SIZE);
409 }
410
411 #[test]
412 fn test_dirent_metadata() {
413 let data_offset = 12;
414 let data = vec![1, 2, 3, 4, 5];
415
416 let mut out: Vec<u8> = vec![];
417 let name = "test_name".as_bytes();
418 let dirent = DirectoryEntry { name: name.to_owned(), data };
419
420 assert_matches!(dirent.serialize_metadata(&mut out, data_offset), Ok(()));
421 assert_eq!(dirent.metadata_size(), out.len() as u32);
422 }
423
424 #[fuchsia::test]
425 async fn test_export() {
426 let dir = pseudo_directory! {
427 "a" => read_only("a content"),
428 "b" => pseudo_directory! {
429 "c" => read_only("c content"),
430 },
431 };
432 let dir_proxy = vfs::directory::serve(dir, fio::PERM_READABLE | fio::PERM_WRITABLE);
433 let ramdisk = RamdiskClient::create(512, 1 << 16).await.unwrap();
434 let channel = ramdisk.open().unwrap();
435
436 assert_matches!(export_directory(&dir_proxy, channel).await, Ok(()));
437 }
438
439 #[fuchsia::test]
440 async fn test_get_entries() {
441 let dir = pseudo_directory! {
442 "a" => read_only("a content"),
443 "d" => read_only("d content"),
444 "b" => pseudo_directory! {
445 "c" => read_only("c content"),
446 },
447 };
448 let dir_proxy = vfs::directory::serve(dir, fio::PERM_READABLE | fio::PERM_WRITABLE);
449 let entries = get_entries(&dir_proxy).await.unwrap();
450
451 assert_eq!(
452 entries,
453 vec![
454 DirectoryEntry { name: b"a".to_vec(), data: b"a content".to_vec() },
455 DirectoryEntry { name: b"b/c".to_vec(), data: b"c content".to_vec() },
456 DirectoryEntry { name: b"d".to_vec(), data: b"d content".to_vec() },
457 ],
458 );
459 }
460}