1use crate::config::Config;
6use crate::partition::PartitionBackend;
7use crate::partitions_directory::PartitionsDirectory;
8use anyhow::{Context as _, Error, anyhow};
9use block_client::{
10 BlockClient as _, BlockDeviceFlag, BufferSlice, MutableBufferSlice, ReadOptions,
11 RemoteBlockClient, VmoId, WriteOptions,
12};
13use block_server::BlockServer;
14use block_server::async_interface::SessionManager;
15
16use fidl::endpoints::ServerEnd;
17use fs_management::format::constants::{
18 ALL_BENCHMARK_PARTITION_LABELS, ALL_SYSTEM_PARTITION_LABELS,
19};
20use fuchsia_sync::Mutex;
21use futures::stream::TryStreamExt as _;
22use std::collections::BTreeMap;
23use std::num::NonZero;
24use std::sync::atomic::{AtomicBool, Ordering};
25use std::sync::{Arc, Weak};
26use {
27 fidl_fuchsia_storage_block as fblock, fidl_fuchsia_storage_partitions as fpartitions,
28 fuchsia_async as fasync,
29};
30
31fn partition_directory_entry_name(index: u32) -> String {
32 format!("part-{:03}", index)
33}
34
35fn should_passthrough_partition(info: &gpt::PartitionInfo) -> bool {
42 ALL_SYSTEM_PARTITION_LABELS.contains(&info.label.as_str())
44 || ALL_BENCHMARK_PARTITION_LABELS.contains(&info.label.as_str())
47}
48
49pub struct GptPartition {
51 gpt: Weak<GptManager>,
52 info: Mutex<gpt::PartitionInfo>,
53 block_client: Arc<RemoteBlockClient>,
54}
55
56fn trace_id(trace_flow_id: Option<NonZero<u64>>) -> u64 {
57 trace_flow_id.map(|v| v.get()).unwrap_or_default()
58}
59
60impl GptPartition {
61 pub fn new(
62 gpt: &Arc<GptManager>,
63 block_client: Arc<RemoteBlockClient>,
64 info: gpt::PartitionInfo,
65 ) -> Arc<Self> {
66 Arc::new(Self { gpt: Arc::downgrade(gpt), info: Mutex::new(info), block_client })
67 }
68
69 pub async fn terminate(&self) {
70 if let Err(error) = self.block_client.close().await {
71 log::warn!(error:?; "Failed to close block client");
72 }
73 }
74
75 pub fn update_info(&self, info: gpt::PartitionInfo) -> gpt::PartitionInfo {
77 std::mem::replace(&mut *self.info.lock(), info)
78 }
79
80 pub fn block_size(&self) -> u32 {
81 self.block_client.block_size()
82 }
83
84 pub fn block_count(&self) -> u64 {
85 self.info.lock().num_blocks
86 }
87
88 pub async fn attach_vmo(&self, vmo: &zx::Vmo) -> Result<VmoId, zx::Status> {
89 self.block_client.attach_vmo(vmo).await
90 }
91
92 pub async fn detach_vmo(&self, vmoid: VmoId) -> Result<(), zx::Status> {
93 self.block_client.detach_vmo(vmoid).await
94 }
95
96 pub fn open_passthrough_session(&self, session: ServerEnd<fblock::SessionMarker>) {
97 if let Some(gpt) = self.gpt.upgrade() {
98 let mapping = {
99 let info = self.info.lock();
100 fblock::BlockOffsetMapping {
101 source_block_offset: 0,
102 target_block_offset: info.start_block,
103 length: info.num_blocks,
104 }
105 };
106 if let Err(err) = gpt.block_proxy.open_session_with_offset_map(session, &mapping) {
107 log::warn!(err:?; "Failed to open passthrough session");
110 }
111 } else {
112 if let Err(err) = session.close_with_epitaph(zx::Status::BAD_STATE) {
113 log::warn!(err:?; "Failed to send session epitaph");
114 }
115 }
116 }
117
118 pub fn get_info(&self) -> block_server::DeviceInfo {
119 convert_partition_info(
120 &*self.info.lock(),
121 self.block_client.block_flags(),
122 self.block_client.max_transfer_blocks(),
123 )
124 }
125
126 pub async fn read(
127 &self,
128 device_block_offset: u64,
129 block_count: u32,
130 vmo_id: &VmoId,
131 vmo_offset: u64, opts: ReadOptions,
133 trace_flow_id: Option<NonZero<u64>>,
134 ) -> Result<(), zx::Status> {
135 let dev_offset = self
136 .absolute_offset(device_block_offset, block_count)
137 .map(|offset| offset * self.block_size() as u64)?;
138 let buffer = MutableBufferSlice::new_with_vmo_id(
139 vmo_id,
140 vmo_offset,
141 (block_count * self.block_size()) as u64,
142 );
143 self.block_client
144 .read_at_with_opts_traced(buffer, dev_offset, opts, trace_id(trace_flow_id))
145 .await
146 }
147
148 pub async fn write(
149 &self,
150 device_block_offset: u64,
151 block_count: u32,
152 vmo_id: &VmoId,
153 vmo_offset: u64, opts: WriteOptions,
155 trace_flow_id: Option<NonZero<u64>>,
156 ) -> Result<(), zx::Status> {
157 let dev_offset = self
158 .absolute_offset(device_block_offset, block_count)
159 .map(|offset| offset * self.block_size() as u64)?;
160 let buffer = BufferSlice::new_with_vmo_id(
161 vmo_id,
162 vmo_offset,
163 (block_count * self.block_size()) as u64,
164 );
165 self.block_client
166 .write_at_with_opts_traced(buffer, dev_offset, opts, trace_id(trace_flow_id))
167 .await
168 }
169
170 pub async fn flush(&self, trace_flow_id: Option<NonZero<u64>>) -> Result<(), zx::Status> {
171 self.block_client.flush_traced(trace_id(trace_flow_id)).await
172 }
173
174 pub async fn trim(
175 &self,
176 device_block_offset: u64,
177 block_count: u32,
178 trace_flow_id: Option<NonZero<u64>>,
179 ) -> Result<(), zx::Status> {
180 let dev_offset = self
181 .absolute_offset(device_block_offset, block_count)
182 .map(|offset| offset * self.block_size() as u64)?;
183 let len = block_count as u64 * self.block_size() as u64;
184 self.block_client.trim_traced(dev_offset..dev_offset + len, trace_id(trace_flow_id)).await
185 }
186
187 fn absolute_offset(&self, mut offset: u64, len: u32) -> Result<u64, zx::Status> {
191 let info = self.info.lock();
192 offset = offset.checked_add(info.start_block).ok_or(zx::Status::OUT_OF_RANGE)?;
193 let end = offset.checked_add(len as u64).ok_or(zx::Status::OUT_OF_RANGE)?;
194 if end > info.start_block + info.num_blocks {
195 Err(zx::Status::OUT_OF_RANGE)
196 } else {
197 Ok(offset)
198 }
199 }
200}
201
202fn convert_partition_info(
203 info: &gpt::PartitionInfo,
204 device_flags: BlockDeviceFlag,
205 max_transfer_blocks: Option<NonZero<u32>>,
206) -> block_server::DeviceInfo {
207 block_server::DeviceInfo::Partition(block_server::PartitionInfo {
208 device_flags,
209 max_transfer_blocks,
210 block_range: Some(info.start_block..info.start_block + info.num_blocks),
211 type_guid: info.type_guid.to_bytes(),
212 instance_guid: info.instance_guid.to_bytes(),
213 name: info.label.clone(),
214 flags: info.flags,
215 })
216}
217
218fn can_merge(a: &gpt::PartitionInfo, b: &gpt::PartitionInfo) -> bool {
219 a.start_block + a.num_blocks == b.start_block
220}
221
222struct PendingTransaction {
223 transaction: gpt::Transaction,
224 client_koid: zx::Koid,
225 added_partitions: Vec<u32>,
228 _signal_task: fasync::Task<()>,
230}
231
232struct Inner {
233 gpt: gpt::Gpt,
234 partitions: BTreeMap<u32, Arc<BlockServer<SessionManager<PartitionBackend>>>>,
235 overlay_partitions: BTreeMap<u32, Arc<BlockServer<SessionManager<PartitionBackend>>>>,
237 partitions_dir: PartitionsDirectory,
240 pending_transaction: Option<PendingTransaction>,
241}
242
243impl Inner {
244 fn ensure_transaction_matches(&self, transaction: &zx::EventPair) -> Result<(), zx::Status> {
246 if let Some(pending) = self.pending_transaction.as_ref() {
247 if transaction.koid()? == pending.client_koid {
248 Ok(())
249 } else {
250 Err(zx::Status::BAD_HANDLE)
251 }
252 } else {
253 Err(zx::Status::BAD_STATE)
254 }
255 }
256
257 fn bind_partition(
258 &mut self,
259 parent: &Arc<GptManager>,
260 index: u32,
261 info: gpt::PartitionInfo,
262 overlay_indexes: Vec<usize>,
263 ) -> Result<(), Error> {
264 let passthrough = should_passthrough_partition(&info);
265 log::debug!(
266 "GPT part {index}{}{}: {info:?}",
267 if !overlay_indexes.is_empty() { " (overlay)" } else { "" },
268 if passthrough { " (passthrough)" } else { "" },
269 );
270 info.start_block
271 .checked_add(info.num_blocks)
272 .ok_or_else(|| anyhow!("Overflow in partition end"))?;
273 let partition = PartitionBackend::new(
274 GptPartition::new(parent, self.gpt.client().clone(), info),
275 passthrough,
276 );
277 let block_server = Arc::new(BlockServer::new(parent.block_size, partition));
278 if !overlay_indexes.is_empty() {
279 self.partitions_dir.add_overlay(
280 &partition_directory_entry_name(index),
281 Arc::downgrade(&block_server),
282 Arc::downgrade(parent),
283 overlay_indexes,
284 );
285 self.overlay_partitions.insert(index, block_server);
286 } else {
287 self.partitions_dir.add_partition(
288 &partition_directory_entry_name(index),
289 Arc::downgrade(&block_server),
290 Arc::downgrade(parent),
291 index as usize,
292 );
293 self.partitions.insert(index, block_server);
294 }
295 Ok(())
296 }
297
298 fn bind_super_and_userdata_partition(
299 &mut self,
300 parent: &Arc<GptManager>,
301 super_partition: (u32, gpt::PartitionInfo),
302 userdata_partition: (u32, gpt::PartitionInfo),
303 ) -> Result<(), Error> {
304 let info = gpt::PartitionInfo {
305 label: "super_and_userdata".to_string(),
307 type_guid: super_partition.1.type_guid.clone(),
308 instance_guid: super_partition.1.instance_guid.clone(),
309 start_block: super_partition.1.start_block,
310 num_blocks: super_partition.1.num_blocks + userdata_partition.1.num_blocks,
311 flags: super_partition.1.flags,
312 };
313 log::trace!(
314 "GPT merged parts {:?} + {:?} -> {info:?}",
315 super_partition.1,
316 userdata_partition.1
317 );
318 self.bind_partition(
319 parent,
320 super_partition.0,
321 info,
322 vec![super_partition.0 as usize, userdata_partition.0 as usize],
323 )
324 }
325
326 fn bind_all_partitions(&mut self, parent: &Arc<GptManager>) -> Result<(), Error> {
327 self.partitions.clear();
328 self.overlay_partitions.clear();
329 self.partitions_dir.clear();
330
331 let mut partitions = self.gpt.partitions().clone();
332 if parent.config.merge_super_and_userdata {
333 let super_part = match partitions
336 .iter()
337 .find(|(_, info)| info.label == "super")
338 .map(|(index, _)| *index)
339 {
340 Some(index) => partitions.remove_entry(&index),
341 None => None,
342 };
343 let userdata_part = match partitions
344 .iter()
345 .find(|(_, info)| info.label == "userdata")
346 .map(|(index, _)| *index)
347 {
348 Some(index) => partitions.remove_entry(&index),
349 None => None,
350 };
351 if super_part.is_some() && userdata_part.is_some() {
352 let super_part = super_part.unwrap();
353 let userdata_part = userdata_part.unwrap();
354 if can_merge(&super_part.1, &userdata_part.1) {
355 self.bind_super_and_userdata_partition(parent, super_part, userdata_part)?;
356 } else {
357 log::warn!("super/userdata cannot be merged");
358 self.bind_partition(parent, super_part.0, super_part.1, vec![])?;
359 self.bind_partition(parent, userdata_part.0, userdata_part.1, vec![])?;
360 }
361 } else if super_part.is_some() || userdata_part.is_some() {
362 log::warn!("Only one of super/userdata found; not merging");
363 let (index, info) = super_part.or(userdata_part).unwrap();
364 self.bind_partition(parent, index, info, vec![])?;
365 }
366 }
367 for (index, info) in partitions {
368 self.bind_partition(parent, index, info, vec![])?;
369 }
370 Ok(())
371 }
372
373 fn add_partition(&mut self, info: gpt::PartitionInfo) -> Result<usize, gpt::AddPartitionError> {
374 let pending = self.pending_transaction.as_mut().unwrap();
375 let idx = self.gpt.add_partition(&mut pending.transaction, info)?;
376 pending.added_partitions.push(idx as u32);
377 Ok(idx)
378 }
379}
380
381pub struct GptManager {
383 config: Config,
384 block_proxy: fblock::BlockProxy,
385 block_size: u32,
386 block_count: u64,
387 inner: futures::lock::Mutex<Inner>,
388 shutdown: AtomicBool,
389}
390
391impl std::fmt::Debug for GptManager {
392 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
393 f.debug_struct("GptManager")
394 .field("block_size", &self.block_size)
395 .field("block_count", &self.block_count)
396 .finish()
397 }
398}
399
400impl GptManager {
401 pub async fn new(
402 block_proxy: fblock::BlockProxy,
403 partitions_dir: Arc<vfs::directory::immutable::Simple>,
404 ) -> Result<Arc<Self>, Error> {
405 Self::new_with_config(block_proxy, partitions_dir, Config::default()).await
406 }
407
408 pub async fn new_with_config(
409 block_proxy: fblock::BlockProxy,
410 partitions_dir: Arc<vfs::directory::immutable::Simple>,
411 config: Config,
412 ) -> Result<Arc<Self>, Error> {
413 log::info!("Binding to GPT");
414 let client = Arc::new(RemoteBlockClient::new(block_proxy.clone()).await?);
415 let block_size = client.block_size();
416 let block_count = client.block_count();
417 let gpt = gpt::Gpt::open(client).await.context("Failed to load GPT")?;
418
419 let this = Arc::new(Self {
420 config,
421 block_proxy,
422 block_size,
423 block_count,
424 inner: futures::lock::Mutex::new(Inner {
425 gpt,
426 partitions: BTreeMap::new(),
427 overlay_partitions: BTreeMap::new(),
428 partitions_dir: PartitionsDirectory::new(partitions_dir),
429 pending_transaction: None,
430 }),
431 shutdown: AtomicBool::new(false),
432 });
433 this.inner.lock().await.bind_all_partitions(&this)?;
434 log::info!("Starting all partitions OK!");
435 Ok(this)
436 }
437
438 pub fn block_size(&self) -> u32 {
439 self.block_size
440 }
441
442 pub fn block_count(&self) -> u64 {
443 self.block_count
444 }
445
446 pub async fn create_transaction(self: &Arc<Self>) -> Result<zx::EventPair, zx::Status> {
447 let mut inner = self.inner.lock().await;
448 if inner.pending_transaction.is_some() {
449 return Err(zx::Status::ALREADY_EXISTS);
450 }
451 let transaction = inner.gpt.create_transaction().unwrap();
452 let (client_end, server_end) = zx::EventPair::create();
453 let client_koid = client_end.koid()?;
454 let signal_waiter = fasync::OnSignals::new(server_end, zx::Signals::EVENTPAIR_PEER_CLOSED);
455 let this = self.clone();
456 let task = fasync::Task::spawn(async move {
457 let _ = signal_waiter.await;
458 let mut inner = this.inner.lock().await;
459 if inner.pending_transaction.as_ref().map_or(false, |t| t.client_koid == client_koid) {
460 inner.pending_transaction = None;
461 }
462 });
463 inner.pending_transaction = Some(PendingTransaction {
464 transaction,
465 client_koid,
466 added_partitions: vec![],
467 _signal_task: task,
468 });
469 Ok(client_end)
470 }
471
472 pub async fn commit_transaction(
473 self: &Arc<Self>,
474 transaction: zx::EventPair,
475 ) -> Result<(), zx::Status> {
476 let mut inner = self.inner.lock().await;
477 inner.ensure_transaction_matches(&transaction)?;
478 let pending = std::mem::take(&mut inner.pending_transaction).unwrap();
479 let partitions = pending.transaction.partitions.clone();
480 if let Err(err) = inner.gpt.commit_transaction(pending.transaction).await {
481 log::warn!(err:?; "Failed to commit transaction");
482 return Err(zx::Status::IO);
483 }
484 for (info, idx) in partitions
486 .iter()
487 .zip(0u32..)
488 .filter(|(info, idx)| !info.is_nil() && !pending.added_partitions.contains(idx))
489 {
490 if let Some(part) = inner.partitions.get(&idx) {
497 part.session_manager().interface().update_info(info.clone());
498 }
499 }
500 for idx in pending.added_partitions {
501 if let Some(info) = inner.gpt.partitions().get(&idx).cloned() {
502 if let Err(err) = inner.bind_partition(self, idx, info, vec![]) {
503 log::error!(err:?; "Failed to bind partition");
504 }
505 }
506 }
507 Ok(())
508 }
509
510 pub async fn add_partition(
511 &self,
512 request: fpartitions::PartitionsManagerAddPartitionRequest,
513 ) -> Result<(), zx::Status> {
514 let mut inner = self.inner.lock().await;
515 inner.ensure_transaction_matches(
516 request.transaction.as_ref().ok_or(zx::Status::BAD_HANDLE)?,
517 )?;
518 let info = gpt::PartitionInfo {
519 label: request.name.ok_or(zx::Status::INVALID_ARGS)?,
520 type_guid: request
521 .type_guid
522 .map(|value| gpt::Guid::from_bytes(value.value))
523 .ok_or(zx::Status::INVALID_ARGS)?,
524 instance_guid: request
525 .instance_guid
526 .map(|value| gpt::Guid::from_bytes(value.value))
527 .unwrap_or_else(|| gpt::Guid::generate()),
528 start_block: 0,
529 num_blocks: request.num_blocks.ok_or(zx::Status::INVALID_ARGS)?,
530 flags: request.flags.unwrap_or_default(),
531 };
532 let idx = inner.add_partition(info)?;
533 let partition =
534 inner.pending_transaction.as_ref().unwrap().transaction.partitions.get(idx).unwrap();
535 log::info!(
536 "Allocated partition {:?} at {:?}",
537 partition.label,
538 partition.start_block..partition.start_block + partition.num_blocks
539 );
540 Ok(())
541 }
542
543 pub async fn handle_partitions_requests(
544 &self,
545 gpt_index: usize,
546 mut requests: fpartitions::PartitionRequestStream,
547 ) -> Result<(), zx::Status> {
548 while let Some(request) = requests.try_next().await.unwrap() {
549 match request {
550 fpartitions::PartitionRequest::UpdateMetadata { payload, responder } => {
551 responder
552 .send(
553 self.update_partition_metadata(gpt_index, payload)
554 .await
555 .map_err(|status| status.into_raw()),
556 )
557 .unwrap_or_else(
558 |err| log::error!(err:?; "Failed to send UpdateMetadata response"),
559 );
560 }
561 }
562 }
563 Ok(())
564 }
565
566 async fn update_partition_metadata(
567 &self,
568 gpt_index: usize,
569 request: fpartitions::PartitionUpdateMetadataRequest,
570 ) -> Result<(), zx::Status> {
571 let mut inner = self.inner.lock().await;
572 inner.ensure_transaction_matches(
573 request.transaction.as_ref().ok_or(zx::Status::BAD_HANDLE)?,
574 )?;
575
576 let transaction = &mut inner.pending_transaction.as_mut().unwrap().transaction;
577 let entry = transaction.partitions.get_mut(gpt_index).ok_or(zx::Status::BAD_STATE)?;
578 if let Some(type_guid) = request.type_guid.as_ref().cloned() {
579 entry.type_guid = gpt::Guid::from_bytes(type_guid.value);
580 }
581 if let Some(flags) = request.flags.as_ref() {
582 entry.flags = *flags;
583 }
584 Ok(())
585 }
586
587 pub async fn handle_overlay_partitions_requests(
588 &self,
589 gpt_indexes: Vec<usize>,
590 mut requests: fpartitions::OverlayPartitionRequestStream,
591 ) -> Result<(), zx::Status> {
592 while let Some(request) = requests.try_next().await.unwrap() {
593 match request {
594 fpartitions::OverlayPartitionRequest::GetPartitions { responder } => {
595 match self.get_overlay_partition_info(&gpt_indexes[..]).await {
596 Ok(partitions) => responder.send(Ok(&partitions[..])),
597 Err(status) => responder.send(Err(status.into_raw())),
598 }
599 .unwrap_or_else(
600 |err| log::error!(err:?; "Failed to send GetPartitions response"),
601 );
602 }
603 }
604 }
605 Ok(())
606 }
607
608 async fn get_overlay_partition_info(
609 &self,
610 gpt_indexes: &[usize],
611 ) -> Result<Vec<fpartitions::PartitionInfo>, zx::Status> {
612 fn convert_partition_info(info: &gpt::PartitionInfo) -> fpartitions::PartitionInfo {
613 fpartitions::PartitionInfo {
614 name: info.label.to_string(),
615 type_guid: fblock::Guid { value: info.type_guid.to_bytes() },
616 instance_guid: fblock::Guid { value: info.instance_guid.to_bytes() },
617 start_block: info.start_block,
618 num_blocks: info.num_blocks,
619 flags: info.flags,
620 }
621 }
622
623 let inner = self.inner.lock().await;
624 let mut partitions = vec![];
625 for index in gpt_indexes {
626 let index: u32 = *index as u32;
627 partitions.push(
628 inner
629 .gpt
630 .partitions()
631 .get(&index)
632 .map(convert_partition_info)
633 .ok_or(zx::Status::BAD_STATE)?,
634 );
635 }
636 Ok(partitions)
637 }
638
639 pub async fn reset_partition_table(
640 self: &Arc<Self>,
641 partitions: Vec<gpt::PartitionInfo>,
642 ) -> Result<(), zx::Status> {
643 let mut inner = self.inner.lock().await;
644 if inner.pending_transaction.is_some() {
645 return Err(zx::Status::BAD_STATE);
646 }
647
648 log::info!("Resetting gpt. Expect data loss!!!");
649 let mut transaction = inner.gpt.create_transaction().unwrap();
650 transaction.partitions = partitions;
651 inner.gpt.commit_transaction(transaction).await?;
652
653 if let Err(err) = inner.bind_all_partitions(&self) {
654 log::error!(err:?; "Failed to rebind partitions");
655 return Err(zx::Status::BAD_STATE);
656 }
657 log::info!("Rebinding partitions OK!");
658 Ok(())
659 }
660
661 pub async fn shutdown(self: Arc<Self>) {
662 log::info!("Shutting down gpt");
663 let mut inner = self.inner.lock().await;
664 inner.partitions_dir.clear();
665 inner.partitions.clear();
666 inner.overlay_partitions.clear();
667 self.shutdown.store(true, Ordering::Relaxed);
668 log::info!("Shutting down gpt OK");
669 }
670}
671
672impl Drop for GptManager {
673 fn drop(&mut self) {
674 assert!(self.shutdown.load(Ordering::Relaxed), "Did you forget to shutdown?");
675 }
676}
677
678#[cfg(test)]
679mod tests {
680 use super::GptManager;
681 use block_client::{
682 BlockClient as _, BlockDeviceFlag, BufferSlice, MutableBufferSlice, RemoteBlockClient,
683 WriteFlags,
684 };
685 use block_server::{BlockInfo, DeviceInfo, WriteOptions};
686 use fidl::HandleBased as _;
687 use fs_management::format::constants::FVM_PARTITION_LABEL;
688 use fuchsia_component::client::connect_to_named_protocol_at_dir_root;
689 use gpt::{Gpt, Guid, PartitionInfo};
690 use std::num::NonZero;
691 use std::sync::Arc;
692 use std::sync::atomic::{AtomicBool, Ordering};
693 use vmo_backed_block_server::{
694 InitialContents, VmoBackedServer, VmoBackedServerOptions, VmoBackedServerTestingExt as _,
695 };
696 use {
697 fidl_fuchsia_io as fio, fidl_fuchsia_storage_block as fblock,
698 fidl_fuchsia_storage_partitions as fpartitions, fuchsia_async as fasync,
699 };
700
701 async fn setup(
702 block_size: u32,
703 block_count: u64,
704 partitions: Vec<PartitionInfo>,
705 ) -> (Arc<VmoBackedServer>, Arc<vfs::directory::immutable::Simple>) {
706 setup_with_options(
707 VmoBackedServerOptions {
708 initial_contents: InitialContents::FromCapacity(block_count),
709 block_size,
710 ..Default::default()
711 },
712 partitions,
713 )
714 .await
715 }
716
717 async fn setup_with_options(
718 opts: VmoBackedServerOptions<'_>,
719 partitions: Vec<PartitionInfo>,
720 ) -> (Arc<VmoBackedServer>, Arc<vfs::directory::immutable::Simple>) {
721 let server = Arc::new(opts.build().unwrap());
722 {
723 let (block_client, block_server) =
724 fidl::endpoints::create_proxy::<fblock::BlockMarker>();
725 let volume_stream = fidl::endpoints::ServerEnd::<fblock::BlockMarker>::from(
726 block_server.into_channel(),
727 )
728 .into_stream();
729 let server_clone = server.clone();
730 let _task = fasync::Task::spawn(async move { server_clone.serve(volume_stream).await });
731 let client = Arc::new(RemoteBlockClient::new(block_client).await.unwrap());
732 Gpt::format(client, partitions).await.unwrap();
733 }
734 (server, vfs::directory::immutable::simple())
735 }
736
737 #[fuchsia::test]
738 async fn load_unformatted_gpt() {
739 let vmo = zx::Vmo::create(4096).unwrap();
740 let server = Arc::new(VmoBackedServer::from_vmo(512, vmo));
741
742 GptManager::new(server.connect(), vfs::directory::immutable::simple())
743 .await
744 .expect_err("load should fail");
745 }
746
747 #[fuchsia::test]
748 async fn load_formatted_empty_gpt() {
749 let (block_device, partitions_dir) = setup(512, 8, vec![]).await;
750
751 let runner = GptManager::new(block_device.connect(), partitions_dir)
752 .await
753 .expect("load should succeed");
754 runner.shutdown().await;
755 }
756
757 #[fuchsia::test]
758 async fn load_formatted_gpt_with_one_partition() {
759 const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
760 const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
761 const PART_NAME: &str = "part";
762
763 let (block_device, partitions_dir) = setup(
764 512,
765 8,
766 vec![PartitionInfo {
767 label: PART_NAME.to_string(),
768 type_guid: Guid::from_bytes(PART_TYPE_GUID),
769 instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
770 start_block: 4,
771 num_blocks: 1,
772 flags: 0,
773 }],
774 )
775 .await;
776
777 let partitions_dir_clone = partitions_dir.clone();
778 let runner = GptManager::new(block_device.connect(), partitions_dir_clone)
779 .await
780 .expect("load should succeed");
781 partitions_dir.get_entry("part-000").expect("No entry found");
782 partitions_dir.get_entry("part-001").map(|_| ()).expect_err("Extra entry found");
783 runner.shutdown().await;
784 }
785
786 #[fuchsia::test]
787 async fn load_formatted_gpt_with_two_partitions() {
788 const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
789 const PART_INSTANCE_1_GUID: [u8; 16] = [2u8; 16];
790 const PART_INSTANCE_2_GUID: [u8; 16] = [3u8; 16];
791 const PART_1_NAME: &str = "part1";
792 const PART_2_NAME: &str = "part2";
793
794 let (block_device, partitions_dir) = setup(
795 512,
796 8,
797 vec![
798 PartitionInfo {
799 label: PART_1_NAME.to_string(),
800 type_guid: Guid::from_bytes(PART_TYPE_GUID),
801 instance_guid: Guid::from_bytes(PART_INSTANCE_1_GUID),
802 start_block: 4,
803 num_blocks: 1,
804 flags: 0,
805 },
806 PartitionInfo {
807 label: PART_2_NAME.to_string(),
808 type_guid: Guid::from_bytes(PART_TYPE_GUID),
809 instance_guid: Guid::from_bytes(PART_INSTANCE_2_GUID),
810 start_block: 5,
811 num_blocks: 1,
812 flags: 0,
813 },
814 ],
815 )
816 .await;
817
818 let partitions_dir_clone = partitions_dir.clone();
819 let runner = GptManager::new(block_device.connect(), partitions_dir_clone)
820 .await
821 .expect("load should succeed");
822 partitions_dir.get_entry("part-000").expect("No entry found");
823 partitions_dir.get_entry("part-001").expect("No entry found");
824 partitions_dir.get_entry("part-002").map(|_| ()).expect_err("Extra entry found");
825 runner.shutdown().await;
826 }
827
828 #[fuchsia::test]
829 async fn partition_io() {
830 const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
831 const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
832 const PART_NAME: &str = "part";
833
834 let (block_device, partitions_dir) = setup(
835 512,
836 8,
837 vec![PartitionInfo {
838 label: PART_NAME.to_string(),
839 type_guid: Guid::from_bytes(PART_TYPE_GUID),
840 instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
841 start_block: 4,
842 num_blocks: 2,
843 flags: 0,
844 }],
845 )
846 .await;
847
848 let partitions_dir_clone = partitions_dir.clone();
849 let runner = GptManager::new(block_device.connect(), partitions_dir_clone)
850 .await
851 .expect("load should succeed");
852
853 let proxy = vfs::serve_directory(
854 partitions_dir.clone(),
855 vfs::path::Path::validate_and_split("part-000").unwrap(),
856 fio::PERM_READABLE,
857 );
858 let block = connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&proxy, "volume")
859 .expect("Failed to open block service");
860 let client = RemoteBlockClient::new(block).await.expect("Failed to create block client");
861
862 assert_eq!(client.block_count(), 2);
863 assert_eq!(client.block_size(), 512);
864
865 let buf = vec![0xabu8; 512];
866 client.write_at(BufferSlice::Memory(&buf[..]), 0).await.expect("write_at failed");
867 client
868 .write_at(BufferSlice::Memory(&buf[..]), 1024)
869 .await
870 .expect_err("write_at should fail when writing past partition end");
871 let mut buf2 = vec![0u8; 512];
872 client.read_at(MutableBufferSlice::Memory(&mut buf2[..]), 0).await.expect("read_at failed");
873 assert_eq!(buf, buf2);
874 client
875 .read_at(MutableBufferSlice::Memory(&mut buf2[..]), 1024)
876 .await
877 .expect_err("read_at should fail when reading past partition end");
878 client.trim(512..1024).await.expect("trim failed");
879 client.trim(1..512).await.expect_err("trim with invalid range should fail");
880 client.trim(1024..1536).await.expect_err("trim past end of partition should fail");
881 runner.shutdown().await;
882
883 let mut buf = vec![0u8; 512];
885 let client =
886 RemoteBlockClient::new(block_device.connect::<fblock::BlockProxy>()).await.unwrap();
887 client.read_at(MutableBufferSlice::Memory(&mut buf[..]), 2048).await.unwrap();
888 assert_eq!(&buf[..], &[0xabu8; 512]);
889 }
890
891 #[fuchsia::test]
892 async fn load_formatted_gpt_with_invalid_primary_header() {
893 const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
894 const PART_INSTANCE_1_GUID: [u8; 16] = [2u8; 16];
895 const PART_INSTANCE_2_GUID: [u8; 16] = [3u8; 16];
896 const PART_1_NAME: &str = "part1";
897 const PART_2_NAME: &str = "part2";
898
899 let (block_device, partitions_dir) = setup(
900 512,
901 8,
902 vec![
903 PartitionInfo {
904 label: PART_1_NAME.to_string(),
905 type_guid: Guid::from_bytes(PART_TYPE_GUID),
906 instance_guid: Guid::from_bytes(PART_INSTANCE_1_GUID),
907 start_block: 4,
908 num_blocks: 1,
909 flags: 0,
910 },
911 PartitionInfo {
912 label: PART_2_NAME.to_string(),
913 type_guid: Guid::from_bytes(PART_TYPE_GUID),
914 instance_guid: Guid::from_bytes(PART_INSTANCE_2_GUID),
915 start_block: 5,
916 num_blocks: 1,
917 flags: 0,
918 },
919 ],
920 )
921 .await;
922 {
923 let (client, stream) =
924 fidl::endpoints::create_proxy_and_stream::<fblock::BlockMarker>();
925 let server = block_device.clone();
926 let _task = fasync::Task::spawn(async move { server.serve(stream).await });
927 let client = RemoteBlockClient::new(client).await.unwrap();
928 client.write_at(BufferSlice::Memory(&[0xffu8; 512]), 512).await.unwrap();
929 }
930
931 let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
932 .await
933 .expect("load should succeed");
934 partitions_dir.get_entry("part-000").expect("No entry found");
935 partitions_dir.get_entry("part-001").expect("No entry found");
936 runner.shutdown().await;
937 }
938
939 #[fuchsia::test]
940 async fn load_formatted_gpt_with_invalid_primary_partition_table() {
941 const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
942 const PART_INSTANCE_1_GUID: [u8; 16] = [2u8; 16];
943 const PART_INSTANCE_2_GUID: [u8; 16] = [3u8; 16];
944 const PART_1_NAME: &str = "part1";
945 const PART_2_NAME: &str = "part2";
946
947 let (block_device, partitions_dir) = setup(
948 512,
949 8,
950 vec![
951 PartitionInfo {
952 label: PART_1_NAME.to_string(),
953 type_guid: Guid::from_bytes(PART_TYPE_GUID),
954 instance_guid: Guid::from_bytes(PART_INSTANCE_1_GUID),
955 start_block: 4,
956 num_blocks: 1,
957 flags: 0,
958 },
959 PartitionInfo {
960 label: PART_2_NAME.to_string(),
961 type_guid: Guid::from_bytes(PART_TYPE_GUID),
962 instance_guid: Guid::from_bytes(PART_INSTANCE_2_GUID),
963 start_block: 5,
964 num_blocks: 1,
965 flags: 0,
966 },
967 ],
968 )
969 .await;
970 {
971 let (client, stream) =
972 fidl::endpoints::create_proxy_and_stream::<fblock::BlockMarker>();
973 let server = block_device.clone();
974 let _task = fasync::Task::spawn(async move { server.serve(stream).await });
975 let client = RemoteBlockClient::new(client).await.unwrap();
976 client.write_at(BufferSlice::Memory(&[0xffu8; 512]), 1024).await.unwrap();
977 }
978
979 let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
980 .await
981 .expect("load should succeed");
982 partitions_dir.get_entry("part-000").expect("No entry found");
983 partitions_dir.get_entry("part-001").expect("No entry found");
984 runner.shutdown().await;
985 }
986
987 #[fuchsia::test]
988 async fn force_access_passed_through() {
989 const BLOCK_SIZE: u32 = 512;
990 const BLOCK_COUNT: u64 = 1024;
991
992 struct Observer(Arc<AtomicBool>);
993
994 impl vmo_backed_block_server::Observer for Observer {
995 fn write(
996 &self,
997 _device_block_offset: u64,
998 _block_count: u32,
999 _vmo: &Arc<zx::Vmo>,
1000 _vmo_offset: u64,
1001 opts: WriteOptions,
1002 ) -> vmo_backed_block_server::WriteAction {
1003 assert_eq!(
1004 opts.flags.contains(WriteFlags::FORCE_ACCESS),
1005 self.0.load(Ordering::Relaxed)
1006 );
1007 vmo_backed_block_server::WriteAction::Write
1008 }
1009 }
1010
1011 let expect_force_access = Arc::new(AtomicBool::new(false));
1012 let (server, partitions_dir) = setup_with_options(
1013 VmoBackedServerOptions {
1014 initial_contents: InitialContents::FromCapacity(BLOCK_COUNT),
1015 block_size: BLOCK_SIZE,
1016 observer: Some(Box::new(Observer(expect_force_access.clone()))),
1017 info: DeviceInfo::Block(BlockInfo {
1018 device_flags: fblock::DeviceFlag::FUA_SUPPORT,
1019 ..Default::default()
1020 }),
1021 ..Default::default()
1022 },
1023 vec![PartitionInfo {
1024 label: "foo".to_string(),
1025 type_guid: Guid::from_bytes([1; 16]),
1026 instance_guid: Guid::from_bytes([2; 16]),
1027 start_block: 4,
1028 num_blocks: 1,
1029 flags: 0,
1030 }],
1031 )
1032 .await;
1033
1034 let manager = GptManager::new(server.connect(), partitions_dir.clone()).await.unwrap();
1035
1036 let proxy = vfs::serve_directory(
1037 partitions_dir.clone(),
1038 vfs::path::Path::validate_and_split("part-000").unwrap(),
1039 fio::PERM_READABLE,
1040 );
1041 let block = connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&proxy, "volume")
1042 .expect("Failed to open block service");
1043 let client = RemoteBlockClient::new(block).await.expect("Failed to create block client");
1044
1045 let buffer = vec![0; BLOCK_SIZE as usize];
1046 client.write_at(BufferSlice::Memory(&buffer), 0).await.unwrap();
1047
1048 expect_force_access.store(true, Ordering::Relaxed);
1049
1050 client
1051 .write_at_with_opts(
1052 BufferSlice::Memory(&buffer),
1053 0,
1054 WriteOptions { flags: WriteFlags::FORCE_ACCESS, ..Default::default() },
1055 )
1056 .await
1057 .unwrap();
1058
1059 manager.shutdown().await;
1060 }
1061
1062 #[fuchsia::test]
1063 async fn barrier_passed_through() {
1064 const BLOCK_SIZE: u32 = 512;
1065 const BLOCK_COUNT: u64 = 1024;
1066
1067 struct Observer(Arc<AtomicBool>);
1068
1069 impl vmo_backed_block_server::Observer for Observer {
1070 fn write(
1071 &self,
1072 _device_block_offset: u64,
1073 _block_count: u32,
1074 _vmo: &Arc<zx::Vmo>,
1075 _vmo_offset: u64,
1076 opts: WriteOptions,
1077 ) -> vmo_backed_block_server::WriteAction {
1078 assert_eq!(
1079 opts.flags.contains(WriteFlags::PRE_BARRIER),
1080 self.0.load(Ordering::Relaxed)
1081 );
1082 vmo_backed_block_server::WriteAction::Write
1083 }
1084 }
1085
1086 let expect_barrier = Arc::new(AtomicBool::new(false));
1087 let (server, partitions_dir) = setup_with_options(
1088 VmoBackedServerOptions {
1089 initial_contents: InitialContents::FromCapacity(BLOCK_COUNT),
1090 block_size: BLOCK_SIZE,
1091 observer: Some(Box::new(Observer(expect_barrier.clone()))),
1092 info: DeviceInfo::Block(BlockInfo {
1093 device_flags: fblock::DeviceFlag::BARRIER_SUPPORT,
1094 ..Default::default()
1095 }),
1096 ..Default::default()
1097 },
1098 vec![PartitionInfo {
1099 label: "foo".to_string(),
1100 type_guid: Guid::from_bytes([1; 16]),
1101 instance_guid: Guid::from_bytes([2; 16]),
1102 start_block: 4,
1103 num_blocks: 1,
1104 flags: 0,
1105 }],
1106 )
1107 .await;
1108
1109 let manager = GptManager::new(server.connect(), partitions_dir.clone()).await.unwrap();
1110
1111 let proxy = vfs::serve_directory(
1112 partitions_dir.clone(),
1113 vfs::path::Path::validate_and_split("part-000").unwrap(),
1114 fio::PERM_READABLE,
1115 );
1116 let block = connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&proxy, "volume")
1117 .expect("Failed to open block service");
1118 let client = RemoteBlockClient::new(block).await.expect("Failed to create block client");
1119
1120 let buffer = vec![0; BLOCK_SIZE as usize];
1121 client.write_at(BufferSlice::Memory(&buffer), 0).await.unwrap();
1122
1123 expect_barrier.store(true, Ordering::Relaxed);
1124 client.barrier();
1125 client.write_at(BufferSlice::Memory(&buffer), 0).await.unwrap();
1126
1127 manager.shutdown().await;
1128 }
1129
1130 #[fuchsia::test]
1131 async fn commit_transaction() {
1132 const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
1133 const PART_1_INSTANCE_GUID: [u8; 16] = [2u8; 16];
1134 const PART_1_NAME: &str = "part";
1135 const PART_2_INSTANCE_GUID: [u8; 16] = [3u8; 16];
1136 const PART_2_NAME: &str = "part2";
1137
1138 let (block_device, partitions_dir) = setup(
1139 512,
1140 16,
1141 vec![
1142 PartitionInfo {
1143 label: PART_1_NAME.to_string(),
1144 type_guid: Guid::from_bytes(PART_TYPE_GUID),
1145 instance_guid: Guid::from_bytes(PART_1_INSTANCE_GUID),
1146 start_block: 4,
1147 num_blocks: 1,
1148 flags: 0,
1149 },
1150 PartitionInfo {
1151 label: PART_2_NAME.to_string(),
1152 type_guid: Guid::from_bytes(PART_TYPE_GUID),
1153 instance_guid: Guid::from_bytes(PART_2_INSTANCE_GUID),
1154 start_block: 5,
1155 num_blocks: 1,
1156 flags: 0,
1157 },
1158 ],
1159 )
1160 .await;
1161 let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1162 .await
1163 .expect("load should succeed");
1164
1165 let part_0_dir = vfs::serve_directory(
1166 partitions_dir.clone(),
1167 vfs::Path::validate_and_split("part-000").unwrap(),
1168 fio::PERM_READABLE,
1169 );
1170 let part_1_dir = vfs::serve_directory(
1171 partitions_dir.clone(),
1172 vfs::Path::validate_and_split("part-001").unwrap(),
1173 fio::PERM_READABLE,
1174 );
1175 let part_0_proxy = connect_to_named_protocol_at_dir_root::<fpartitions::PartitionMarker>(
1176 &part_0_dir,
1177 "partition",
1178 )
1179 .expect("Failed to open Partition service");
1180 let part_1_proxy = connect_to_named_protocol_at_dir_root::<fpartitions::PartitionMarker>(
1181 &part_1_dir,
1182 "partition",
1183 )
1184 .expect("Failed to open Partition service");
1185
1186 let transaction = runner.create_transaction().await.expect("Failed to create transaction");
1187 part_0_proxy
1188 .update_metadata(fpartitions::PartitionUpdateMetadataRequest {
1189 transaction: Some(transaction.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()),
1190 type_guid: Some(fblock::Guid { value: [0xffu8; 16] }),
1191 ..Default::default()
1192 })
1193 .await
1194 .expect("FIDL error")
1195 .expect("Failed to update_metadata");
1196 part_1_proxy
1197 .update_metadata(fpartitions::PartitionUpdateMetadataRequest {
1198 transaction: Some(transaction.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()),
1199 flags: Some(1234),
1200 ..Default::default()
1201 })
1202 .await
1203 .expect("FIDL error")
1204 .expect("Failed to update_metadata");
1205 runner.commit_transaction(transaction).await.expect("Failed to commit transaction");
1206
1207 let part_0_block =
1209 connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&part_0_dir, "volume")
1210 .expect("Failed to open Volume service");
1211 let (status, guid) = part_0_block.get_type_guid().await.expect("FIDL error");
1212 assert_eq!(zx::Status::from_raw(status), zx::Status::OK);
1213 assert_eq!(guid.unwrap().value, [0xffu8; 16]);
1214 let part_1_block =
1215 connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&part_1_dir, "volume")
1216 .expect("Failed to open Volume service");
1217 let metadata =
1218 part_1_block.get_metadata().await.expect("FIDL error").expect("get_metadata failed");
1219 assert_eq!(metadata.type_guid.unwrap().value, PART_TYPE_GUID);
1220 assert_eq!(metadata.flags, Some(1234));
1221
1222 runner.shutdown().await;
1223 }
1224
1225 #[fuchsia::test]
1226 async fn commit_transaction_with_io_error() {
1227 const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
1228 const PART_1_INSTANCE_GUID: [u8; 16] = [2u8; 16];
1229 const PART_1_NAME: &str = "part";
1230 const PART_2_INSTANCE_GUID: [u8; 16] = [3u8; 16];
1231 const PART_2_NAME: &str = "part2";
1232
1233 #[derive(Clone)]
1234 struct Observer(Arc<AtomicBool>);
1235 impl vmo_backed_block_server::Observer for Observer {
1236 fn write(
1237 &self,
1238 _device_block_offset: u64,
1239 _block_count: u32,
1240 _vmo: &Arc<zx::Vmo>,
1241 _vmo_offset: u64,
1242 _opts: WriteOptions,
1243 ) -> vmo_backed_block_server::WriteAction {
1244 if self.0.load(Ordering::Relaxed) {
1245 vmo_backed_block_server::WriteAction::Fail
1246 } else {
1247 vmo_backed_block_server::WriteAction::Write
1248 }
1249 }
1250 }
1251 let observer = Observer(Arc::new(AtomicBool::new(false)));
1252 let (block_device, partitions_dir) = setup_with_options(
1253 VmoBackedServerOptions {
1254 initial_contents: InitialContents::FromCapacity(16),
1255 block_size: 512,
1256 observer: Some(Box::new(observer.clone())),
1257 ..Default::default()
1258 },
1259 vec![
1260 PartitionInfo {
1261 label: PART_1_NAME.to_string(),
1262 type_guid: Guid::from_bytes(PART_TYPE_GUID),
1263 instance_guid: Guid::from_bytes(PART_1_INSTANCE_GUID),
1264 start_block: 4,
1265 num_blocks: 1,
1266 flags: 0,
1267 },
1268 PartitionInfo {
1269 label: PART_2_NAME.to_string(),
1270 type_guid: Guid::from_bytes(PART_TYPE_GUID),
1271 instance_guid: Guid::from_bytes(PART_2_INSTANCE_GUID),
1272 start_block: 5,
1273 num_blocks: 1,
1274 flags: 0,
1275 },
1276 ],
1277 )
1278 .await;
1279 let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1280 .await
1281 .expect("load should succeed");
1282
1283 let part_0_dir = vfs::serve_directory(
1284 partitions_dir.clone(),
1285 vfs::Path::validate_and_split("part-000").unwrap(),
1286 fio::PERM_READABLE,
1287 );
1288 let part_1_dir = vfs::serve_directory(
1289 partitions_dir.clone(),
1290 vfs::Path::validate_and_split("part-001").unwrap(),
1291 fio::PERM_READABLE,
1292 );
1293 let part_0_proxy = connect_to_named_protocol_at_dir_root::<fpartitions::PartitionMarker>(
1294 &part_0_dir,
1295 "partition",
1296 )
1297 .expect("Failed to open Partition service");
1298 let part_1_proxy = connect_to_named_protocol_at_dir_root::<fpartitions::PartitionMarker>(
1299 &part_1_dir,
1300 "partition",
1301 )
1302 .expect("Failed to open Partition service");
1303
1304 let transaction = runner.create_transaction().await.expect("Failed to create transaction");
1305 part_0_proxy
1306 .update_metadata(fpartitions::PartitionUpdateMetadataRequest {
1307 transaction: Some(transaction.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()),
1308 type_guid: Some(fblock::Guid { value: [0xffu8; 16] }),
1309 ..Default::default()
1310 })
1311 .await
1312 .expect("FIDL error")
1313 .expect("Failed to update_metadata");
1314 part_1_proxy
1315 .update_metadata(fpartitions::PartitionUpdateMetadataRequest {
1316 transaction: Some(transaction.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()),
1317 flags: Some(1234),
1318 ..Default::default()
1319 })
1320 .await
1321 .expect("FIDL error")
1322 .expect("Failed to update_metadata");
1323
1324 observer.0.store(true, Ordering::Relaxed); runner.commit_transaction(transaction).await.expect_err("Commit transaction should fail");
1326
1327 let part_0_block =
1329 connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&part_0_dir, "volume")
1330 .expect("Failed to open Volume service");
1331 let (status, guid) = part_0_block.get_type_guid().await.expect("FIDL error");
1332 assert_eq!(zx::Status::from_raw(status), zx::Status::OK);
1333 assert_eq!(guid.unwrap().value, [2u8; 16]);
1334 let part_1_block =
1335 connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&part_1_dir, "volume")
1336 .expect("Failed to open Volume service");
1337 let metadata =
1338 part_1_block.get_metadata().await.expect("FIDL error").expect("get_metadata failed");
1339 assert_eq!(metadata.type_guid.unwrap().value, PART_TYPE_GUID);
1340 assert_eq!(metadata.flags, Some(0));
1341
1342 runner.shutdown().await;
1343 }
1344
1345 #[fuchsia::test]
1346 async fn reset_partition_tables() {
1347 const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
1350 const PART_1_INSTANCE_GUID: [u8; 16] = [2u8; 16];
1351 const PART_1_NAME: &str = "part";
1352 const PART_2_INSTANCE_GUID: [u8; 16] = [3u8; 16];
1353 const PART_2_NAME: &str = "part2";
1354 const PART_3_NAME: &str = "part3";
1355 const PART_4_NAME: &str = "part4";
1356
1357 let (block_device, partitions_dir) = setup(
1358 512,
1359 1048576 / 512,
1360 vec![
1361 PartitionInfo {
1362 label: PART_1_NAME.to_string(),
1363 type_guid: Guid::from_bytes(PART_TYPE_GUID),
1364 instance_guid: Guid::from_bytes(PART_1_INSTANCE_GUID),
1365 start_block: 4,
1366 num_blocks: 1,
1367 flags: 0,
1368 },
1369 PartitionInfo {
1370 label: PART_2_NAME.to_string(),
1371 type_guid: Guid::from_bytes(PART_TYPE_GUID),
1372 instance_guid: Guid::from_bytes(PART_2_INSTANCE_GUID),
1373 start_block: 5,
1374 num_blocks: 1,
1375 flags: 0,
1376 },
1377 ],
1378 )
1379 .await;
1380 let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1381 .await
1382 .expect("load should succeed");
1383 let nil_entry = PartitionInfo {
1384 label: "".to_string(),
1385 type_guid: Guid::from_bytes([0u8; 16]),
1386 instance_guid: Guid::from_bytes([0u8; 16]),
1387 start_block: 0,
1388 num_blocks: 0,
1389 flags: 0,
1390 };
1391 let mut new_partitions = vec![nil_entry; 128];
1392 new_partitions[0] = PartitionInfo {
1393 label: PART_3_NAME.to_string(),
1394 type_guid: Guid::from_bytes(PART_TYPE_GUID),
1395 instance_guid: Guid::from_bytes([1u8; 16]),
1396 start_block: 64,
1397 num_blocks: 2,
1398 flags: 0,
1399 };
1400 new_partitions[2] = PartitionInfo {
1401 label: PART_4_NAME.to_string(),
1402 type_guid: Guid::from_bytes(PART_TYPE_GUID),
1403 instance_guid: Guid::from_bytes([2u8; 16]),
1404 start_block: 66,
1405 num_blocks: 4,
1406 flags: 0,
1407 };
1408 runner.reset_partition_table(new_partitions).await.expect("reset_partition_table failed");
1409 partitions_dir.get_entry("part-000").expect("No entry found");
1410 partitions_dir.get_entry("part-001").map(|_| ()).expect_err("Extra entry found");
1411 partitions_dir.get_entry("part-002").expect("No entry found");
1412
1413 let proxy = vfs::serve_directory(
1414 partitions_dir.clone(),
1415 vfs::path::Path::validate_and_split("part-000").unwrap(),
1416 fio::PERM_READABLE,
1417 );
1418 let block = connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&proxy, "volume")
1419 .expect("Failed to open block service");
1420 let (status, name) = block.get_name().await.expect("FIDL error");
1421 assert_eq!(zx::Status::from_raw(status), zx::Status::OK);
1422 assert_eq!(name.unwrap(), PART_3_NAME);
1423
1424 runner.shutdown().await;
1425 }
1426
1427 #[fuchsia::test]
1428 async fn reset_partition_tables_fails_if_too_many_partitions() {
1429 let (block_device, partitions_dir) = setup(512, 8, vec![]).await;
1430 let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1431 .await
1432 .expect("load should succeed");
1433 let nil_entry = PartitionInfo {
1434 label: "".to_string(),
1435 type_guid: Guid::from_bytes([0u8; 16]),
1436 instance_guid: Guid::from_bytes([0u8; 16]),
1437 start_block: 0,
1438 num_blocks: 0,
1439 flags: 0,
1440 };
1441 let new_partitions = vec![nil_entry; 128];
1442 runner
1443 .reset_partition_table(new_partitions)
1444 .await
1445 .expect_err("reset_partition_table should fail");
1446
1447 runner.shutdown().await;
1448 }
1449
1450 #[fuchsia::test]
1451 async fn reset_partition_tables_fails_if_too_large_partitions() {
1452 let (block_device, partitions_dir) = setup(512, 64, vec![]).await;
1453 let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1454 .await
1455 .expect("load should succeed");
1456 let new_partitions = vec![
1457 PartitionInfo {
1458 label: "a".to_string(),
1459 type_guid: Guid::from_bytes([1u8; 16]),
1460 instance_guid: Guid::from_bytes([1u8; 16]),
1461 start_block: 4,
1462 num_blocks: 2,
1463 flags: 0,
1464 },
1465 PartitionInfo {
1466 label: "b".to_string(),
1467 type_guid: Guid::from_bytes([2u8; 16]),
1468 instance_guid: Guid::from_bytes([2u8; 16]),
1469 start_block: 6,
1470 num_blocks: 200,
1471 flags: 0,
1472 },
1473 ];
1474 runner
1475 .reset_partition_table(new_partitions)
1476 .await
1477 .expect_err("reset_partition_table should fail");
1478
1479 runner.shutdown().await;
1480 }
1481
1482 #[fuchsia::test]
1483 async fn reset_partition_tables_fails_if_partition_overlaps_metadata() {
1484 let (block_device, partitions_dir) = setup(512, 64, vec![]).await;
1485 let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1486 .await
1487 .expect("load should succeed");
1488 let new_partitions = vec![PartitionInfo {
1489 label: "a".to_string(),
1490 type_guid: Guid::from_bytes([1u8; 16]),
1491 instance_guid: Guid::from_bytes([1u8; 16]),
1492 start_block: 1,
1493 num_blocks: 2,
1494 flags: 0,
1495 }];
1496 runner
1497 .reset_partition_table(new_partitions)
1498 .await
1499 .expect_err("reset_partition_table should fail");
1500
1501 runner.shutdown().await;
1502 }
1503
1504 #[fuchsia::test]
1505 async fn reset_partition_tables_fails_if_partitions_overlap() {
1506 let (block_device, partitions_dir) = setup(512, 64, vec![]).await;
1507 let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1508 .await
1509 .expect("load should succeed");
1510 let new_partitions = vec![
1511 PartitionInfo {
1512 label: "a".to_string(),
1513 type_guid: Guid::from_bytes([1u8; 16]),
1514 instance_guid: Guid::from_bytes([1u8; 16]),
1515 start_block: 32,
1516 num_blocks: 2,
1517 flags: 0,
1518 },
1519 PartitionInfo {
1520 label: "b".to_string(),
1521 type_guid: Guid::from_bytes([2u8; 16]),
1522 instance_guid: Guid::from_bytes([2u8; 16]),
1523 start_block: 33,
1524 num_blocks: 1,
1525 flags: 0,
1526 },
1527 ];
1528 runner
1529 .reset_partition_table(new_partitions)
1530 .await
1531 .expect_err("reset_partition_table should fail");
1532
1533 runner.shutdown().await;
1534 }
1535
1536 #[fuchsia::test]
1537 async fn add_partition() {
1538 let (block_device, partitions_dir) = setup(512, 64, vec![PartitionInfo::nil(); 64]).await;
1539 let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1540 .await
1541 .expect("load should succeed");
1542
1543 let transaction = runner.create_transaction().await.expect("Create transaction failed");
1544 let request = fpartitions::PartitionsManagerAddPartitionRequest {
1545 transaction: Some(transaction.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()),
1546 name: Some("a".to_string()),
1547 type_guid: Some(fblock::Guid { value: [1u8; 16] }),
1548 num_blocks: Some(2),
1549 ..Default::default()
1550 };
1551 runner.add_partition(request).await.expect("add_partition failed");
1552 runner.commit_transaction(transaction).await.expect("add_partition failed");
1553
1554 let proxy = vfs::serve_directory(
1555 partitions_dir.clone(),
1556 vfs::path::Path::validate_and_split("part-000").unwrap(),
1557 fio::PERM_READABLE,
1558 );
1559 let block = connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&proxy, "volume")
1560 .expect("Failed to open block service");
1561 let client: RemoteBlockClient =
1562 RemoteBlockClient::new(block).await.expect("Failed to create block client");
1563
1564 assert_eq!(client.block_count(), 2);
1565 assert_eq!(client.block_size(), 512);
1566
1567 runner.shutdown().await;
1568 }
1569
1570 #[fuchsia::test]
1571 async fn partition_info() {
1572 const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
1573 const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
1574 const PART_NAME: &str = "part";
1575
1576 let (block_device, partitions_dir) = setup_with_options(
1577 VmoBackedServerOptions {
1578 initial_contents: InitialContents::FromCapacity(16),
1579 block_size: 512,
1580 info: DeviceInfo::Block(BlockInfo {
1581 max_transfer_blocks: NonZero::new(2),
1582 device_flags: BlockDeviceFlag::READONLY
1583 | BlockDeviceFlag::REMOVABLE
1584 | BlockDeviceFlag::ZSTD_DECOMPRESSION_SUPPORT,
1585 ..Default::default()
1586 }),
1587 ..Default::default()
1588 },
1589 vec![PartitionInfo {
1590 label: PART_NAME.to_string(),
1591 type_guid: Guid::from_bytes(PART_TYPE_GUID),
1592 instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
1593 start_block: 4,
1594 num_blocks: 1,
1595 flags: 0xabcd,
1596 }],
1597 )
1598 .await;
1599
1600 let partitions_dir_clone = partitions_dir.clone();
1601 let runner = GptManager::new(block_device.connect(), partitions_dir_clone)
1602 .await
1603 .expect("load should succeed");
1604
1605 let part_dir = vfs::serve_directory(
1606 partitions_dir.clone(),
1607 vfs::path::Path::validate_and_split("part-000").unwrap(),
1608 fio::PERM_READABLE,
1609 );
1610 let part_block =
1611 connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&part_dir, "volume")
1612 .expect("Failed to open Volume service");
1613 let info: fblock::BlockInfo =
1614 part_block.get_info().await.expect("FIDL error").expect("get_info failed");
1615 assert_eq!(info.block_count, 1);
1616 assert_eq!(info.block_size, 512);
1617 assert_eq!(
1618 info.flags,
1619 BlockDeviceFlag::READONLY
1620 | BlockDeviceFlag::REMOVABLE
1621 | BlockDeviceFlag::ZSTD_DECOMPRESSION_SUPPORT
1622 );
1623 assert_eq!(info.max_transfer_size, 1024);
1624
1625 let metadata: fblock::BlockGetMetadataResponse =
1626 part_block.get_metadata().await.expect("FIDL error").expect("get_metadata failed");
1627 assert_eq!(metadata.name, Some(PART_NAME.to_string()));
1628 assert_eq!(metadata.type_guid.unwrap().value, PART_TYPE_GUID);
1629 assert_eq!(metadata.instance_guid.unwrap().value, PART_INSTANCE_GUID);
1630 assert_eq!(metadata.start_block_offset, Some(4));
1631 assert_eq!(metadata.num_blocks, Some(1));
1632 assert_eq!(metadata.flags, Some(0xabcd));
1633
1634 runner.shutdown().await;
1635 }
1636
1637 #[fuchsia::test]
1638 async fn nested_gpt() {
1639 const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
1640 const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
1641 const PART_NAME: &str = "part";
1642
1643 let vmo = zx::Vmo::create(64 * 512).unwrap();
1644 let vmo_clone = vmo.create_child(zx::VmoChildOptions::REFERENCE, 0, 0).unwrap();
1645 let (outer_block_device, outer_partitions_dir) = setup_with_options(
1646 VmoBackedServerOptions {
1647 initial_contents: InitialContents::FromVmo(vmo_clone),
1648 block_size: 512,
1649 info: DeviceInfo::Block(BlockInfo {
1650 device_flags: BlockDeviceFlag::READONLY | BlockDeviceFlag::REMOVABLE,
1651 ..Default::default()
1652 }),
1653 ..Default::default()
1654 },
1655 vec![PartitionInfo {
1656 label: PART_NAME.to_string(),
1657 type_guid: Guid::from_bytes(PART_TYPE_GUID),
1658 instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
1659 start_block: 4,
1660 num_blocks: 16,
1661 flags: 0xabcd,
1662 }],
1663 )
1664 .await;
1665
1666 let outer_partitions_dir_clone = outer_partitions_dir.clone();
1667 let outer_runner =
1668 GptManager::new(outer_block_device.connect(), outer_partitions_dir_clone)
1669 .await
1670 .expect("load should succeed");
1671
1672 let outer_part_dir = vfs::serve_directory(
1673 outer_partitions_dir.clone(),
1674 vfs::path::Path::validate_and_split("part-000").unwrap(),
1675 fio::PERM_READABLE,
1676 );
1677 let part_block =
1678 connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&outer_part_dir, "volume")
1679 .expect("Failed to open Block service");
1680
1681 let client = Arc::new(RemoteBlockClient::new(part_block.clone()).await.unwrap());
1682 let _ = gpt::Gpt::format(
1683 client,
1684 vec![PartitionInfo {
1685 label: PART_NAME.to_string(),
1686 type_guid: Guid::from_bytes(PART_TYPE_GUID),
1687 instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
1688 start_block: 5,
1689 num_blocks: 1,
1690 flags: 0xabcd,
1691 }],
1692 )
1693 .await
1694 .unwrap();
1695
1696 let partitions_dir = vfs::directory::immutable::simple();
1697 let partitions_dir_clone = partitions_dir.clone();
1698 let runner =
1699 GptManager::new(part_block, partitions_dir_clone).await.expect("load should succeed");
1700 let part_dir = vfs::serve_directory(
1701 partitions_dir.clone(),
1702 vfs::path::Path::validate_and_split("part-000").unwrap(),
1703 fio::PERM_READABLE,
1704 );
1705 let inner_part_block =
1706 connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&part_dir, "volume")
1707 .expect("Failed to open Block service");
1708
1709 let client =
1710 RemoteBlockClient::new(inner_part_block).await.expect("Failed to create block client");
1711 assert_eq!(client.block_count(), 1);
1712 assert_eq!(client.block_size(), 512);
1713
1714 let buffer = vec![0xaa; 512];
1715 client.write_at(BufferSlice::Memory(&buffer), 0).await.unwrap();
1716 client
1717 .write_at(BufferSlice::Memory(&buffer), 512)
1718 .await
1719 .expect_err("Write past end should fail");
1720 client.flush().await.unwrap();
1721
1722 runner.shutdown().await;
1723 outer_runner.shutdown().await;
1724
1725 let data = vmo.read_to_vec::<u8>(9 * 512, 512).unwrap();
1727 assert_eq!(&data[..], &buffer[..]);
1728 }
1729
1730 #[fuchsia::test]
1731 async fn offset_map_does_not_allow_partition_overwrite() {
1732 const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
1733 const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
1734 const PART_NAME: &str = FVM_PARTITION_LABEL;
1735
1736 let (block_device, partitions_dir) = setup_with_options(
1737 VmoBackedServerOptions {
1738 initial_contents: InitialContents::FromCapacity(16),
1739 block_size: 512,
1740 info: DeviceInfo::Block(BlockInfo {
1741 device_flags: fblock::DeviceFlag::READONLY | fblock::DeviceFlag::REMOVABLE,
1742 ..Default::default()
1743 }),
1744 ..Default::default()
1745 },
1746 vec![PartitionInfo {
1747 label: PART_NAME.to_string(),
1748 type_guid: Guid::from_bytes(PART_TYPE_GUID),
1749 instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
1750 start_block: 4,
1751 num_blocks: 2,
1752 flags: 0xabcd,
1753 }],
1754 )
1755 .await;
1756
1757 let partitions_dir_clone = partitions_dir.clone();
1758 let runner = GptManager::new(block_device.connect(), partitions_dir_clone)
1759 .await
1760 .expect("load should succeed");
1761
1762 let part_dir = vfs::serve_directory(
1763 partitions_dir.clone(),
1764 vfs::path::Path::validate_and_split("part-000").unwrap(),
1765 fio::PERM_READABLE,
1766 );
1767
1768 let part_block =
1769 connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&part_dir, "volume")
1770 .expect("Failed to open Block service");
1771
1772 let (session, server_end) = fidl::endpoints::create_proxy::<fblock::SessionMarker>();
1775 part_block
1776 .open_session_with_offset_map(
1777 server_end,
1778 &fblock::BlockOffsetMapping {
1779 source_block_offset: 0,
1780 target_block_offset: 1,
1781 length: 2,
1782 },
1783 )
1784 .expect("FIDL error");
1785 session.get_fifo().await.expect_err("Session should be closed");
1786
1787 let (session, server_end) = fidl::endpoints::create_proxy::<fblock::SessionMarker>();
1788 part_block
1789 .open_session_with_offset_map(
1790 server_end,
1791 &fblock::BlockOffsetMapping {
1792 source_block_offset: 0,
1793 target_block_offset: 0,
1794 length: 3,
1795 },
1796 )
1797 .expect("FIDL error");
1798 session.get_fifo().await.expect_err("Session should be closed");
1799
1800 runner.shutdown().await;
1801 }
1802}