vmo_backed_block_server/
vmo_backed_server.rs

1// Copyright 2024 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use anyhow::{Error, anyhow};
6use block_server::async_interface::{Interface, SessionManager};
7use block_server::{BlockInfo, BlockServer, DeviceInfo, ReadOptions, WriteFlags, WriteOptions};
8use fidl::endpoints::{ClientEnd, FromClient, RequestStream, ServerEnd, create_endpoints};
9use fidl_fuchsia_hardware_inlineencryption::{DeviceMarker, DeviceRequest, DeviceRequestStream};
10use fidl_fuchsia_storage_block as fblock;
11use fs_management::filesystem::BlockConnector;
12use fuchsia_sync::Mutex;
13use futures::stream::StreamExt;
14use fxfs_crypto::{FscryptSoftwareInoLblk32FileCipher, UnwrappedKey};
15use rand::Rng as _;
16use std::borrow::Cow;
17use std::collections::BTreeMap;
18use std::num::NonZero;
19use std::sync::Arc;
20use std::time::Duration;
21
22/// The Observer can silently discard writes, or fail them explicitly (zx::Status::IO is returned).
23pub enum WriteAction {
24    Write,
25    Discard,
26    Fail,
27}
28
29pub trait Observer: Send + Sync {
30    fn read(
31        &self,
32        _device_block_offset: u64,
33        _block_count: u32,
34        _vmo: &Arc<zx::Vmo>,
35        _vmo_offset: u64,
36    ) {
37    }
38
39    fn write(
40        &self,
41        _device_block_offset: u64,
42        _block_count: u32,
43        _vmo: &Arc<zx::Vmo>,
44        _vmo_offset: u64,
45        _opts: WriteOptions,
46    ) -> WriteAction {
47        WriteAction::Write
48    }
49
50    // If [`VmoBackedServerOptions::write_tracking`] is enabled, `writes` is set to the batch since
51    // last flush or barrier and can be freely modified.
52    fn flush(&self, _writes: Option<&mut WriteCache>) {}
53
54    // If [`VmoBackedServerOptions::write_tracking`] is enabled, `writes` is set to the batch since
55    // last flush or barrier and can be freely modified.
56    fn close(&self, _writes: Option<&mut WriteCache>) {}
57
58    fn trim(&self, _device_block_offset: u64, _block_count: u32) {}
59}
60
61/// A local server backed by a VMO.
62pub struct VmoBackedServer {
63    server: BlockServer<SessionManager<Data>>,
64    // Maps keyslots to lblk32 software ciphers used to encrypt/decrypt file contents.
65    fscrypt_keys: Arc<Mutex<BTreeMap<u8, FscryptSoftwareInoLblk32FileCipher>>>,
66}
67
68/// The initial contents of the VMO.  This also determines the size of the block device.
69pub enum InitialContents<'a> {
70    /// An empty VMO will be created with capacity for this many *blocks*.
71    FromCapacity(u64),
72    /// A VMO is created with capacity for this many *blocks* and the buffer's contents copied into
73    /// it.
74    FromCapacityAndBuffer(u64, &'a [u8]),
75    /// A VMO is created which is exactly large enough for the initial contents (rounded up to block
76    /// size), and the buffer's contents copied into it.
77    FromBuffer(&'a [u8]),
78    /// The provided VMO is used.  If its size is not block-aligned, the data will be truncated.
79    FromVmo(zx::Vmo),
80}
81
82pub struct VmoBackedServerOptions<'a> {
83    /// NB: `block_count` is ignored as that comes from `initial_contents`.
84    pub info: DeviceInfo,
85    pub block_size: u32,
86    pub initial_contents: InitialContents<'a>,
87    pub observer: Option<Box<dyn Observer>>,
88    /// Enables write tracking so [`Observer::flush`] and [`Observer::barrier`] will be provided
89    /// with [`WriteCache`].
90    /// Note that this is expensive and should mainly be used for tests.
91    pub write_tracking: bool,
92    /// If set, each operation will be delayed by a random duration <= this value, which is useful
93    /// for testing race conditions due to out-of-order block requests.
94    pub max_jitter_usec: Option<u64>,
95}
96
97impl Default for VmoBackedServerOptions<'_> {
98    fn default() -> Self {
99        VmoBackedServerOptions {
100            info: DeviceInfo::Block(BlockInfo {
101                device_flags: fblock::DeviceFlag::empty(),
102                block_count: 0,
103                max_transfer_blocks: None,
104            }),
105            block_size: 512,
106            initial_contents: InitialContents::FromCapacity(0),
107            observer: None,
108            write_tracking: false,
109            max_jitter_usec: None,
110        }
111    }
112}
113
114impl VmoBackedServerOptions<'_> {
115    pub fn build(self) -> Result<VmoBackedServer, Error> {
116        let (data, block_count) = match self.initial_contents {
117            InitialContents::FromCapacity(block_count) => {
118                (zx::Vmo::create(block_count * self.block_size as u64)?, block_count)
119            }
120            InitialContents::FromCapacityAndBuffer(block_count, buf) => {
121                let needed =
122                    buf.len()
123                        .checked_next_multiple_of(self.block_size as usize)
124                        .ok_or_else(|| anyhow!("Invalid buffer size"))? as u64
125                        / self.block_size as u64;
126                if needed > block_count {
127                    return Err(anyhow!("Not enough capacity: {needed} vs {block_count}"));
128                }
129                let vmo = zx::Vmo::create(block_count * self.block_size as u64)?;
130                vmo.write(buf, 0)?;
131                (vmo, block_count)
132            }
133            InitialContents::FromBuffer(buf) => {
134                let block_count =
135                    buf.len()
136                        .checked_next_multiple_of(self.block_size as usize)
137                        .ok_or_else(|| anyhow!("Invalid buffer size"))? as u64
138                        / self.block_size as u64;
139                let vmo = zx::Vmo::create(block_count * self.block_size as u64)?;
140                vmo.write(buf, 0)?;
141                (vmo, block_count)
142            }
143            InitialContents::FromVmo(vmo) => {
144                let size = vmo.get_size()?;
145                let block_count = size / self.block_size as u64;
146                (vmo, block_count)
147            }
148        };
149
150        let info = match self.info {
151            DeviceInfo::Block(mut info) => {
152                info.block_count = block_count;
153                DeviceInfo::Block(info)
154            }
155            DeviceInfo::Partition(mut info) => {
156                info.block_range = Some(0..block_count);
157                DeviceInfo::Partition(info)
158            }
159        };
160        let fscrypt_keys = Arc::new(Mutex::new(BTreeMap::new()));
161        Ok(VmoBackedServer {
162            server: BlockServer::new(
163                self.block_size,
164                Arc::new(Data {
165                    info,
166                    block_size: self.block_size,
167                    data,
168                    observer: self.observer,
169                    write_cache: if self.write_tracking {
170                        Some(Mutex::new(WriteCache::new(self.block_size as u64)))
171                    } else {
172                        None
173                    },
174                    fscrypt_keys: fscrypt_keys.clone(),
175                    max_jitter_usec: self.max_jitter_usec,
176                }),
177            ),
178            fscrypt_keys,
179        })
180    }
181}
182
183impl VmoBackedServer {
184    /// Handles `requests`.  The future will resolve when the stream terminates.
185    pub async fn serve(&self, requests: fblock::BlockRequestStream) -> Result<(), Error> {
186        let res = self.server.handle_requests(requests).await;
187        self.server.session_manager().interface().client_closed()?;
188        res
189    }
190
191    /// Implements software-fallback for fuchsia_hardware_inlineencryption.ProgramKey. There is a
192    /// maximum of 256 keyslots. Insert keyslot at the next available slot.
193    ///
194    /// *WARNING*: This is only intended for testing and is not considered secure.
195    fn program_key(&self, xts_key: &[u8; 64]) -> Result<u8, zx::Status> {
196        let unwrapped_key = UnwrappedKey::new(xts_key.to_vec());
197        let cipher = FscryptSoftwareInoLblk32FileCipher::new(&unwrapped_key);
198        let mut fscrypt_keys = self.fscrypt_keys.lock();
199        // Find the first keyslot that is not in use and use it.
200        for slot in 0..=u8::MAX {
201            if fscrypt_keys.contains_key(&slot) {
202                continue;
203            }
204            fscrypt_keys.insert(slot, cipher);
205            return Ok(slot);
206        }
207        Err(zx::Status::NO_RESOURCES)
208    }
209
210    pub async fn serve_insecure_inline_encryption(
211        self: Arc<Self>,
212        mut requests: DeviceRequestStream,
213        uuid: [u8; 16],
214    ) {
215        while let Some(Ok(request)) = requests.next().await {
216            match request {
217                DeviceRequest::ProgramKey { wrapped_key, data_unit_size: _, responder } => {
218                    responder
219                        .send(
220                            self.program_key(&fscrypt::to_xts_key(&wrapped_key, uuid))
221                                .map_err(zx::Status::into_raw),
222                        )
223                        .unwrap_or_else(|e| {
224                            log::error!("failed to send ProgramKey response. error: {:?}", e);
225                        });
226                }
227                DeviceRequest::DeriveRawSecret { mut wrapped_key, responder } => {
228                    // Swap the nibbles.
229                    for b in &mut wrapped_key {
230                        *b = *b >> 4 | *b << 4;
231                    }
232                    responder.send(Ok(&wrapped_key)).unwrap();
233                }
234            }
235        }
236    }
237}
238
239/// Implements `BlockConnector` to vend connections to a VmoBackedServer.
240pub struct VmoBackedServerConnector {
241    scope: fuchsia_async::Scope,
242    server: Arc<VmoBackedServer>,
243}
244
245impl VmoBackedServerConnector {
246    pub fn new(scope: fuchsia_async::Scope, server: Arc<VmoBackedServer>) -> Self {
247        Self { scope, server }
248    }
249}
250
251impl BlockConnector for VmoBackedServerConnector {
252    fn connect_channel_to_block(
253        &self,
254        server_end: ServerEnd<fblock::BlockMarker>,
255    ) -> Result<(), Error> {
256        let server = self.server.clone();
257        let _ = self.scope.spawn(async move {
258            let _ = server.serve(server_end.into_stream()).await;
259        });
260        Ok(())
261    }
262}
263
264/// Keeps track of a sequence of writes since the last flush or barrier, and allows them to be
265/// arbitrarily modified or re-ordered.
266pub struct WriteCache {
267    block_size: u64,
268    block_offsets: Vec<u64>,
269    buffer: Vec<u8>,
270}
271
272impl WriteCache {
273    fn new(block_size: u64) -> Self {
274        Self { block_size, block_offsets: vec![], buffer: vec![] }
275    }
276
277    fn insert(&mut self, block_offset: u64, contents: &[u8]) {
278        let block_count = contents.len() as u64 / self.block_size;
279        let mut buf_offset = 0;
280        for offset in block_offset..block_offset + block_count {
281            self.block_offsets.push(offset);
282            self.buffer
283                .extend_from_slice(&contents[buf_offset..buf_offset + self.block_size as usize]);
284            buf_offset += self.block_size as usize;
285        }
286    }
287
288    // Reads the last written value, falling back to `data` if there are no local updates.
289    fn read(
290        &self,
291        data: &zx::Vmo,
292        block_offset: u64,
293        contents: &mut [u8],
294    ) -> Result<(), zx::Status> {
295        let block_count = contents.len() as u64 / self.block_size;
296        let max_offset = block_offset + block_count;
297        data.read(contents, block_offset * self.block_size)?;
298        // Apply any buffered writes that would overwrite the actual contents.  If the same offset
299        // shows up multiple times, we want to use the most recent write, so it's important to
300        // iterate in order.
301        for (idx, offset) in self.block_offsets.iter().enumerate() {
302            if *offset >= block_offset && *offset < max_offset {
303                let in_offset = idx * self.block_size as usize;
304                let out_offset = ((*offset - block_offset) * self.block_size) as usize;
305                contents[out_offset..out_offset + self.block_size as usize]
306                    .copy_from_slice(&self.buffer[in_offset..in_offset + self.block_size as usize]);
307            }
308        }
309        Ok(())
310    }
311
312    // Persists all writes to `data` and empties the cache.
313    fn apply(&mut self, data: &zx::Vmo) -> Result<(), zx::Status> {
314        let mut buf_offset = 0;
315        for offset in self.block_offsets.drain(..) {
316            data.write(
317                &self.buffer[buf_offset..buf_offset + self.block_size as usize],
318                offset * self.block_size,
319            )?;
320            buf_offset += self.block_size as usize;
321        }
322        self.buffer.clear();
323        Ok(())
324    }
325
326    /// Returns the number of writes in the batch.
327    pub fn len(&self) -> usize {
328        self.block_offsets.len()
329    }
330
331    /// Returns an iterator over the batch of writes (in temporal sequence).
332    pub fn iter(&self) -> impl Iterator<Item = (&u64, &[u8])> {
333        self.block_offsets.iter().zip(self.buffer.windows(self.block_size as usize))
334    }
335
336    fn swap_writes(&mut self, i: usize, j: usize) {
337        self.block_offsets.swap(i, j);
338        let bs = self.block_size as usize;
339        let mut buf = vec![0u8; bs];
340        buf.copy_from_slice(&self.buffer[i * bs..(i + 1) * bs]);
341        self.buffer.copy_within(j * bs..(j + 1) * bs, i * bs);
342        self.buffer[j * bs..(j + 1) * bs].copy_from_slice(&buf[..]);
343    }
344
345    /// Reorders all writes.
346    pub fn shuffle(&mut self) {
347        // Implements the Fisher–Yates shuffle.
348        let mut rng = rand::rng();
349        for i in 0..self.block_offsets.len() {
350            let j = rng.random_range(0..=i);
351            if i != j {
352                self.swap_writes(i, j);
353            }
354        }
355    }
356
357    /// Discards a random number of writes from the tail, simulating a power-cut.
358    pub fn discard_some(&mut self) {
359        let mut rng = rand::rng();
360        let idx = rng.random_range(0..=self.block_offsets.len());
361        for i in idx..self.block_offsets.len() {
362            self.buffer[i * self.block_size as usize..(i + 1) * self.block_size as usize]
363                .fill(0xab);
364        }
365    }
366}
367
368/// Extension trait for test-only functionality.  `unwrap` is used liberally in these functions, to
369/// simplify their usage in tests.
370pub trait VmoBackedServerTestingExt {
371    fn new(block_count: u64, block_size: u32, initial_content: &[u8]) -> Self;
372    fn from_vmo(block_size: u32, vmo: zx::Vmo) -> Self;
373    fn connect_server(self: &Arc<Self>, server: ServerEnd<fblock::BlockMarker>);
374    fn connect<R: BlockClient>(self: &Arc<Self>) -> R;
375    fn connect_insecure_inline_encryption_server(
376        self: &Arc<Self>,
377        server: ServerEnd<DeviceMarker>,
378        uuid: [u8; 16],
379    ) -> impl Future<Output = ()> + Send;
380    /// Evicts the key slots from `fscrypt_keys`.
381    fn evict_key_slot(&self, slot: u8) -> Result<(), zx::Status>;
382}
383
384pub trait BlockClient: FromClient {}
385
386impl BlockClient for fblock::BlockProxy {}
387impl BlockClient for fblock::BlockSynchronousProxy {}
388impl BlockClient for ClientEnd<fblock::BlockMarker> {}
389
390impl VmoBackedServerTestingExt for VmoBackedServer {
391    fn new(block_count: u64, block_size: u32, initial_content: &[u8]) -> Self {
392        VmoBackedServerOptions {
393            block_size,
394            initial_contents: InitialContents::FromCapacityAndBuffer(block_count, initial_content),
395            ..Default::default()
396        }
397        .build()
398        .unwrap()
399    }
400    fn from_vmo(block_size: u32, vmo: zx::Vmo) -> Self {
401        VmoBackedServerOptions {
402            block_size,
403            initial_contents: InitialContents::FromVmo(vmo),
404            ..Default::default()
405        }
406        .build()
407        .unwrap()
408    }
409
410    fn connect<R: BlockClient>(self: &Arc<Self>) -> R {
411        let (client, server) = create_endpoints::<R::Protocol>();
412        let this = self.clone();
413        fuchsia_async::Task::spawn(async move {
414            let _ = this.serve(server.into_stream().cast_stream()).await;
415        })
416        .detach();
417        R::from_client(client)
418    }
419
420    fn connect_server(self: &Arc<Self>, server: ServerEnd<fblock::BlockMarker>) {
421        let this = self.clone();
422        fuchsia_async::Task::spawn(async move {
423            let _ = this.serve(server.into_stream()).await;
424        })
425        .detach();
426    }
427
428    fn connect_insecure_inline_encryption_server(
429        self: &Arc<Self>,
430        server: ServerEnd<DeviceMarker>,
431        uuid: [u8; 16],
432    ) -> impl Future<Output = ()> + Send {
433        let this = self.clone();
434        async move {
435            this.serve_insecure_inline_encryption(server.into_stream(), uuid).await;
436        }
437    }
438
439    /// Evict key slot for software ciphers.
440    fn evict_key_slot(&self, slot: u8) -> Result<(), zx::Status> {
441        let mut fscrypt_keys = self.fscrypt_keys.lock();
442        match fscrypt_keys.remove(&slot) {
443            Some(_) => Ok(()),
444            None => Err(zx::Status::INVALID_ARGS),
445        }
446    }
447}
448
449struct Data {
450    info: DeviceInfo,
451    block_size: u32,
452    data: zx::Vmo,
453    observer: Option<Box<dyn Observer>>,
454    write_cache: Option<Mutex<WriteCache>>,
455    fscrypt_keys: Arc<Mutex<BTreeMap<u8, FscryptSoftwareInoLblk32FileCipher>>>,
456    max_jitter_usec: Option<u64>,
457}
458
459impl Data {
460    fn jitter(&self) -> Option<fuchsia_async::Timer> {
461        self.max_jitter_usec
462            .map(|max| fuchsia_async::Timer::new(Duration::from_micros(rand::random_range(0..max))))
463    }
464
465    fn client_closed(&self) -> Result<(), zx::Status> {
466        if let Some(mut cache) = self.write_cache.as_ref().map(|w| w.lock()) {
467            if let Some(observer) = self.observer.as_ref() {
468                observer.close(Some(&mut *cache));
469            }
470            cache.apply(&self.data)
471        } else {
472            if let Some(observer) = self.observer.as_ref() {
473                observer.close(None);
474            }
475            Ok(())
476        }
477    }
478}
479
480impl Interface for Data {
481    fn get_info(&self) -> Cow<'_, DeviceInfo> {
482        Cow::Borrowed(&self.info)
483    }
484
485    async fn read(
486        &self,
487        device_block_offset: u64,
488        block_count: u32,
489        vmo: &Arc<zx::Vmo>,
490        vmo_offset: u64,
491        opts: ReadOptions,
492        _trace_flow_id: Option<NonZero<u64>>,
493    ) -> Result<(), zx::Status> {
494        if let Some(jitter) = self.jitter() {
495            jitter.await;
496        }
497        if let Some(observer) = self.observer.as_ref() {
498            observer.read(device_block_offset, block_count, vmo, vmo_offset);
499        }
500        if let Some(max) = self.info.max_transfer_blocks().as_ref() {
501            // Requests should be split up by the core library
502            assert!(block_count <= max.get());
503        }
504        if device_block_offset + block_count as u64 > self.info.block_count().unwrap() {
505            Err(zx::Status::OUT_OF_RANGE)
506        } else {
507            let mut data = if let Some(tracking) = self.write_cache.as_ref() {
508                let mut data = vec![0u8; block_count as usize * self.block_size as usize];
509                tracking.lock().read(&self.data, device_block_offset, &mut data[..])?;
510                data
511            } else {
512                self.data.read_to_vec(
513                    device_block_offset * self.block_size as u64,
514                    block_count as u64 * self.block_size as u64,
515                )?
516            };
517
518            if opts.inline_crypto.is_enabled {
519                let fscrypt_keys = self.fscrypt_keys.lock();
520                if let Some(cipher) = fscrypt_keys.get(&opts.inline_crypto.slot) {
521                    cipher
522                        .decrypt(&mut data, opts.inline_crypto.dun as u128)
523                        .map_err(|_| zx::Status::IO)?;
524                } else {
525                    return Err(zx::Status::IO);
526                }
527            }
528            vmo.write(&data[..], vmo_offset)
529        }
530    }
531
532    async fn write(
533        &self,
534        device_block_offset: u64,
535        block_count: u32,
536        vmo: &Arc<zx::Vmo>,
537        vmo_offset: u64,
538        opts: WriteOptions,
539        _trace_flow_id: Option<NonZero<u64>>,
540    ) -> Result<(), zx::Status> {
541        if let Some(jitter) = self.jitter() {
542            jitter.await;
543        }
544        if let Some(observer) = self.observer.as_ref() {
545            match observer.write(device_block_offset, block_count, vmo, vmo_offset, opts) {
546                WriteAction::Write => {}
547                WriteAction::Discard => return Ok(()),
548                WriteAction::Fail => return Err(zx::Status::IO),
549            }
550        }
551        if opts.flags.contains(WriteFlags::PRE_BARRIER) {
552            if let Some(cache) = self.write_cache.as_ref() {
553                cache.lock().apply(&self.data)?;
554            }
555        }
556        if let Some(max) = self.info.max_transfer_blocks().as_ref() {
557            // Requests should be split up by the core library
558            assert!(block_count <= max.get());
559        }
560        if device_block_offset + block_count as u64 > self.info.block_count().unwrap() {
561            Err(zx::Status::OUT_OF_RANGE)
562        } else {
563            let mut data =
564                vmo.read_to_vec(vmo_offset, block_count as u64 * self.block_size as u64)?;
565            if !opts.flags.contains(WriteFlags::FORCE_ACCESS)
566                && let Some(tracking) = self.write_cache.as_ref()
567            {
568                tracking.lock().insert(device_block_offset, &data[..]);
569            }
570            if opts.inline_crypto.is_enabled {
571                let fscrypt_keys = self.fscrypt_keys.lock();
572                if let Some(cipher) = fscrypt_keys.get(&opts.inline_crypto.slot) {
573                    cipher
574                        .encrypt(&mut data, opts.inline_crypto.dun as u128)
575                        .map_err(|_| zx::Status::IO)?;
576                } else {
577                    return Err(zx::Status::IO);
578                }
579            }
580            self.data.write(&data[..], device_block_offset * self.block_size as u64)?;
581            Ok(())
582        }
583    }
584
585    async fn flush(&self, _trace_flow_id: Option<NonZero<u64>>) -> Result<(), zx::Status> {
586        if let Some(jitter) = self.jitter() {
587            jitter.await;
588        }
589        let mut cache = self.write_cache.as_ref().map(|w| w.lock());
590        if let Some(observer) = self.observer.as_ref() {
591            match cache.as_mut() {
592                Some(w) => observer.flush(Some(&mut *w)),
593                None => observer.flush(None),
594            }
595        }
596        if let Some(w) = cache.as_mut() { w.apply(&self.data) } else { Ok(()) }
597    }
598
599    async fn trim(
600        &self,
601        device_block_offset: u64,
602        block_count: u32,
603        _trace_flow_id: Option<NonZero<u64>>,
604    ) -> Result<(), zx::Status> {
605        if let Some(jitter) = self.jitter() {
606            jitter.await;
607        }
608        if let Some(observer) = self.observer.as_ref() {
609            observer.trim(device_block_offset, block_count);
610        }
611        if device_block_offset + block_count as u64 > self.info.block_count().unwrap() {
612            Err(zx::Status::OUT_OF_RANGE)
613        } else {
614            Ok(())
615        }
616    }
617}
618
619#[cfg(test)]
620mod tests {
621    use block_server::InlineCryptoOptions;
622
623    use super::*;
624
625    #[fuchsia::test]
626    async fn test_program_and_evict_key_slot() {
627        let block_size = 4096;
628        let server = Arc::new(VmoBackedServer::new(100, block_size, &[]));
629
630        let key = [0xaa; 64];
631        let slot = server.program_key(&key).expect("program_key failed");
632        assert_eq!(slot, 0);
633
634        // Use the internal interface to avoid FIDL complexity for this test.
635        let block_interface = server.server.session_manager().interface();
636        // Verify that we can write and read using the programmed key.
637        let vmo = Arc::new(zx::Vmo::create(block_size as u64).expect("Vmo::create failed"));
638        let original_data = vec![0xbb; block_size as usize];
639        vmo.write(&original_data, 0).expect("Vmo::write failed");
640        let write_opts = WriteOptions {
641            inline_crypto: InlineCryptoOptions::enabled(slot, 0),
642            ..Default::default()
643        };
644        block_interface.write(0, 1, &vmo, 0, write_opts, None).await.expect("write failed");
645
646        // Verify we can read it back.
647        let vmo_read = Arc::new(zx::Vmo::create(block_size as u64).expect("Vmo::create failed"));
648        let read_opts = ReadOptions {
649            inline_crypto: InlineCryptoOptions::enabled(slot, 0),
650            ..Default::default()
651        };
652        block_interface.read(0, 1, &vmo_read, 0, read_opts, None).await.expect("read failed");
653        let mut read_data = vec![0u8; block_size as usize];
654        vmo_read.read(&mut read_data, 0).expect("Vmo::read failed");
655        assert_eq!(read_data, original_data);
656
657        server.evict_key_slot(slot).expect("evict_key_slot failed");
658        assert_eq!(server.evict_key_slot(slot), Err(zx::Status::INVALID_ARGS));
659
660        // Writing and reading from file after the key has been evicted should fail.
661        assert_eq!(
662            block_interface.read(0, 1, &vmo_read, 0, read_opts, None).await,
663            Err(zx::Status::IO)
664        );
665
666        assert_eq!(
667            block_interface.write(0, 1, &vmo, 0, write_opts, None).await,
668            Err(zx::Status::IO)
669        );
670    }
671
672    #[fuchsia::test]
673    async fn test_program_key_out_of_slots() {
674        let server = Arc::new(VmoBackedServer::new(100, 512, &[]));
675
676        let key = [0xaa; 64];
677        for expected_slot in 0..=u8::MAX {
678            let slot = server.program_key(&key).expect("program_key failed");
679            assert_eq!(slot, expected_slot);
680        }
681        assert_eq!(server.program_key(&key), Err(zx::Status::NO_RESOURCES));
682    }
683}