Skip to main content

fuchsia_repo/
repo_builder.rs

1// Copyright 2022 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use crate::repo_keys::RepoKeys;
6use crate::repository::RepoStorageProvider;
7use anyhow::{Context, Result, anyhow};
8use camino::{Utf8Path, Utf8PathBuf};
9use chrono::{DateTime, Duration, Utc};
10use delivery_blob::DeliveryBlobType;
11use fuchsia_merkle::Hash;
12use fuchsia_pkg::{BlobInfo, PackageManifest, PackageManifestList, PackagePath, SubpackageInfo};
13use futures::stream::{StreamExt as _, TryStreamExt as _};
14use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet, hash_map};
15use std::fs::{self, File};
16use std::future::Future;
17use std::os::unix::fs::MetadataExt;
18use std::pin::Pin;
19use tempfile::TempDir;
20use tuf::Database;
21use tuf::crypto::HashAlgorithm;
22use tuf::metadata::TargetPath;
23use tuf::pouf::Pouf1;
24use tuf::repo_builder::RepoBuilder as TufRepoBuilder;
25
26#[cfg(not(target_os = "fuchsia"))]
27use crate::repo_client::RepoClient;
28
29/// Number of days from now before the root metadata is expired.
30const DEFAULT_ROOT_EXPIRATION: i64 = 365;
31
32/// Number of days from now before the targets metadata is expired.
33const DEFAULT_TARGETS_EXPIRATION: i64 = 90;
34
35/// Number of days from now before the snapshot metadata is expired.
36const DEFAULT_SNAPSHOT_EXPIRATION: i64 = 30;
37
38/// Number of days from now before the timestamp metadata is expired.
39const DEFAULT_TIMESTAMP_EXPIRATION: i64 = 30;
40
41#[derive(Debug)]
42struct ToBeStagedPackage {
43    manifest_path: Option<Utf8PathBuf>,
44    kind: ToBeStagedPackageKind,
45}
46
47#[derive(Debug)]
48enum ToBeStagedPackageKind {
49    Manifest { manifest: PackageManifest },
50    Archive { _archive_out: TempDir, manifest: PackageManifest },
51}
52
53impl ToBeStagedPackage {
54    fn manifest(&self) -> &PackageManifest {
55        match &self.kind {
56            ToBeStagedPackageKind::Manifest { manifest } => manifest,
57            ToBeStagedPackageKind::Archive { manifest, .. } => manifest,
58        }
59    }
60}
61
62#[derive(Debug)]
63struct StagedBlob {
64    info: BlobInfo,
65    delivery_blob_type: Option<DeliveryBlobType>,
66}
67
68#[derive(Debug, thiserror::Error)]
69#[error(
70    "blob {hash} at {path:?} is {file_size} bytes in size, \
71     but the package manifest indicates it should be {manifest_size} bytes in size"
72)]
73struct BlobSizeMismatchError {
74    hash: Hash,
75    path: String,
76    manifest_size: u64,
77    file_size: u64,
78}
79
80#[derive(Debug, thiserror::Error)]
81#[error("Blobs {hash1} and {hash2} refer to the same hardlinked file")]
82struct MerkleHardLinkMismatchError {
83    hash1: Hash,
84    hash2: Hash,
85}
86
87/// RepoBuilder can create and manipulate package repositories.
88#[derive(Debug)]
89pub struct RepoBuilder<'a, R: RepoStorageProvider> {
90    signing_repo_keys: Option<&'a RepoKeys>,
91    trusted_repo_keys: &'a RepoKeys,
92    database: Option<&'a Database<Pouf1>>,
93    repo: R,
94    ignore_missing_packages: bool,
95    current_time: DateTime<Utc>,
96    time_versioning: bool,
97    refresh_metadata: bool,
98    refresh_non_root_metadata: bool,
99    inherit_from_trusted_targets: bool,
100    named_packages: HashMap<PackagePath, Hash>,
101    staged_packages: HashMap<Hash, ToBeStagedPackage>,
102    staged_blobs: HashMap<Hash, StagedBlob>,
103    deps: HashSet<Utf8PathBuf>,
104}
105
106#[cfg(not(target_os = "fuchsia"))]
107impl<'a, R> RepoBuilder<'a, &'a R>
108where
109    R: RepoStorageProvider,
110{
111    pub fn from_client(
112        client: &'a RepoClient<R>,
113        repo_keys: &'a RepoKeys,
114    ) -> RepoBuilder<'a, &'a R> {
115        Self::from_database(client.remote_repo(), repo_keys, client.database())
116    }
117}
118
119impl<'a, R> RepoBuilder<'a, R>
120where
121    R: RepoStorageProvider,
122{
123    pub fn create(repo: R, repo_keys: &'a RepoKeys) -> RepoBuilder<'a, R> {
124        Self::new(repo, repo_keys, None)
125    }
126
127    pub fn from_database(
128        repo: R,
129        repo_keys: &'a RepoKeys,
130        database: &'a Database<Pouf1>,
131    ) -> RepoBuilder<'a, R> {
132        Self::new(repo, repo_keys, Some(database))
133    }
134
135    fn new(
136        repo: R,
137        trusted_repo_keys: &'a RepoKeys,
138        database: Option<&'a Database<Pouf1>>,
139    ) -> RepoBuilder<'a, R> {
140        RepoBuilder {
141            repo,
142            signing_repo_keys: None,
143            trusted_repo_keys,
144            database,
145            ignore_missing_packages: false,
146            current_time: Utc::now(),
147            time_versioning: false,
148            refresh_metadata: false,
149            refresh_non_root_metadata: false,
150            inherit_from_trusted_targets: true,
151            named_packages: HashMap::new(),
152            staged_packages: HashMap::new(),
153            staged_blobs: HashMap::new(),
154            deps: HashSet::new(),
155        }
156    }
157
158    pub fn signing_repo_keys(mut self, signing_repo_keys: &'a RepoKeys) -> Self {
159        self.signing_repo_keys = Some(signing_repo_keys);
160        self
161    }
162
163    pub fn current_time(mut self, current_time: DateTime<Utc>) -> Self {
164        self.current_time = current_time;
165        self
166    }
167
168    pub fn time_versioning(mut self, time_versioning: bool) -> Self {
169        self.time_versioning = time_versioning;
170        self
171    }
172
173    /// Always generate new root, targets, snapshot, and timestamp metadata, even if unchanged and
174    /// not expired.
175    pub fn refresh_metadata(mut self, refresh_metadata: bool) -> Self {
176        self.refresh_metadata = refresh_metadata;
177        self
178    }
179
180    /// Generate a new targets, snapshot, and timestamp metadata, even if unchanged and not expired.
181    pub fn refresh_non_root_metadata(mut self, refresh_non_root_metadata: bool) -> Self {
182        self.refresh_non_root_metadata = refresh_non_root_metadata;
183        self
184    }
185
186    /// Whether or not the new targets metadata inherits targets and delegations from the trusted
187    /// targets metadata.
188    ///
189    /// Default is `true`.
190    pub fn inherit_from_trusted_targets(mut self, inherit_from_trusted_targets: bool) -> Self {
191        self.inherit_from_trusted_targets = inherit_from_trusted_targets;
192        self
193    }
194
195    /// Whether or not to raise an error if package does not exist.
196    pub fn ignore_missing_packages(mut self, ignore_missing_packages: bool) -> Self {
197        self.ignore_missing_packages = ignore_missing_packages;
198        self
199    }
200
201    /// Stage a package manifest from the `path` to be published.
202    pub async fn add_package(self, path: Utf8PathBuf) -> Result<RepoBuilder<'a, R>> {
203        match fs::read(path.as_std_path()) {
204            Ok(contents) => {
205                let package = PackageManifest::from_reader(&path, &contents[..])
206                    .with_context(|| format!("reading package manifest {path}"))?;
207
208                self.add_package_manifest(Some(path), package).await
209            }
210            Err(err) => {
211                if self.ignore_missing_packages && err.kind() == std::io::ErrorKind::NotFound {
212                    Ok(self)
213                } else {
214                    Err(err).with_context(|| format!("reading package manifest {path}"))
215                }
216            }
217        }
218    }
219
220    /// Stage the package manifests from the iterator of paths to be published.
221    pub async fn add_packages(
222        mut self,
223        paths: impl Iterator<Item = Utf8PathBuf>,
224    ) -> Result<RepoBuilder<'a, R>> {
225        for path in paths {
226            self = self.add_package(path).await?;
227        }
228        Ok(self)
229    }
230
231    /// Stage a package manifest, which was optionally loaded from `path`, to be published.
232    pub async fn add_package_manifest(
233        self,
234        path: Option<Utf8PathBuf>,
235        manifest: PackageManifest,
236    ) -> Result<RepoBuilder<'a, R>> {
237        self.stage_named_package(
238            manifest.package_path(),
239            ToBeStagedPackage {
240                manifest_path: path,
241                kind: ToBeStagedPackageKind::Manifest { manifest },
242            },
243        )
244        .await
245    }
246
247    /// Stage all the top-level package manifests from `iter` to be published.
248    pub async fn add_package_manifests(
249        mut self,
250        iter: impl Iterator<Item = (Option<Utf8PathBuf>, PackageManifest)>,
251    ) -> Result<RepoBuilder<'a, R>> {
252        for (path, package) in iter {
253            self = self.add_package_manifest(path, package).await?;
254        }
255        Ok(self)
256    }
257
258    /// Stage a package archive from the `path` to be published.
259    pub async fn add_package_archive(self, path: Utf8PathBuf) -> Result<RepoBuilder<'a, R>> {
260        if self.ignore_missing_packages && !path.exists() {
261            return Ok(self);
262        }
263
264        let blobs_out = TempDir::new().unwrap();
265        let manifest_out = TempDir::new().unwrap();
266        let manifest = PackageManifest::from_archive(
267            path.as_std_path(),
268            blobs_out.path(),
269            manifest_out.path(),
270        )
271        .with_context(|| format!("reading package archive {path}"))
272        .expect("archive to manifest");
273
274        self.stage_named_package(
275            manifest.package_path(),
276            ToBeStagedPackage {
277                manifest_path: Some(path),
278                kind: ToBeStagedPackageKind::Archive { _archive_out: blobs_out, manifest },
279            },
280        )
281        .await
282    }
283
284    /// Stage the package archives from the iterator of paths to be published.
285    pub async fn add_package_archives(
286        mut self,
287        paths: impl Iterator<Item = Utf8PathBuf>,
288    ) -> Result<RepoBuilder<'a, R>> {
289        for path in paths {
290            self = self.add_package_archive(path).await?;
291        }
292        Ok(self)
293    }
294
295    /// Stage a top-level package manifest described by `package` from the
296    /// `path` to be published. Duplicates are ignored unless registering two
297    /// packages with the same package path and different package hashes.
298    async fn stage_named_package(
299        mut self,
300        package_path: PackagePath,
301        package: ToBeStagedPackage,
302    ) -> Result<RepoBuilder<'a, R>> {
303        let package_hash = package.manifest().hash();
304
305        // We don't want to stage any blobs if we have `ignore_missing_files ==
306        // true`, and a package or subpackage is missing, so we'll track that
307        // separately before we decide to commit to stage this package.
308        let mut staged_blobs = HashMap::new();
309        let mut staged_packages = HashMap::new();
310        let mut deps = HashSet::new();
311
312        let did_stage_package = self
313            .stage_package(package, &mut staged_packages, &mut staged_blobs, &mut deps)
314            .await
315            .with_context(|| format!("staging package path '{package_path}'"))?;
316
317        // Exit early if we did not stage the package.
318        if !did_stage_package {
319            return Ok(self);
320        }
321
322        // Since we successfully processed this package, merge in all the
323        // package hashes and blobs.
324        self.staged_packages.extend(staged_packages);
325        self.staged_blobs.extend(staged_blobs);
326        self.deps.extend(deps);
327
328        match self.named_packages.entry(package_path) {
329            hash_map::Entry::Vacant(entry) => {
330                entry.insert(package_hash);
331            }
332            hash_map::Entry::Occupied(entry) => {
333                let old_package = self.staged_packages.get(entry.get()).unwrap();
334                let new_package = self.staged_packages.get(&package_hash).unwrap();
335
336                check_manifests_are_equivalent(old_package, new_package)
337                    .with_context(|| format!("staging package path '{}'", entry.key()))?;
338            }
339        }
340
341        Ok(self)
342    }
343
344    /// Try to stage a package and the blobs inside of it for publishing to the
345    /// package repository.
346    ///
347    /// The behavior of this function depends on the `ignore_missing_packages`
348    /// setting. If it is true, this will exit early and not stage any files. If
349    /// false, this will return an error.
350    ///
351    /// This returns `true` if the package was staged, or `false` if any of the
352    /// package contents were missing.
353    async fn stage_package(
354        &self,
355        package: ToBeStagedPackage,
356        to_be_staged_packages: &mut HashMap<Hash, ToBeStagedPackage>,
357        to_be_staged_blobs: &mut HashMap<Hash, StagedBlob>,
358        deps: &mut HashSet<Utf8PathBuf>,
359    ) -> Result<bool> {
360        if let Some(path) = &package.manifest_path {
361            deps.insert(Utf8PathBuf::from(path));
362        }
363
364        let package_hash = package.manifest().hash();
365
366        // We'll only add the package manifest if we haven't already staged a manifest for this hash.
367        if self.staged_packages.contains_key(&package_hash)
368            || to_be_staged_packages.contains_key(&package_hash)
369        {
370            return Ok(true);
371        }
372
373        // Determine the set of blobs in the package that are unique.
374        let mut unique_package_blobs = vec![];
375        let package_blobs = package.manifest().blobs();
376        let mut seen_paths = HashSet::new();
377        let mut seen_hashes = HashSet::new();
378        for blob in package_blobs {
379            if seen_paths.insert(&blob.source_path) && seen_hashes.insert(&blob.merkle) {
380                unique_package_blobs.push(blob.to_owned());
381            }
382        }
383
384        // Rust doesn't know we've fully processed the stream, so we need to
385        // explicitly drop it so it knows we're done borrowing values.
386        let blobs = {
387            let to_be_staged_blobs = &*to_be_staged_blobs;
388
389            // Iterate over the blobs in parallel and check if they exist, ignoring
390            // any that we've already staged.
391
392            let stream = futures::stream::iter(unique_package_blobs.iter())
393                .filter_map(|blob| async move {
394                    if self.staged_blobs.contains_key(&blob.merkle)
395                        || to_be_staged_blobs.contains_key(&blob.merkle)
396                    {
397                        None
398                    } else {
399                        Some(async move {
400                            let result = fs::metadata(&blob.source_path);
401                            (blob, result)
402                        })
403                    }
404                })
405                .buffer_unordered(std::thread::available_parallelism()?.get());
406
407            // Gather up the results. If `ignore_missing_files` is true and any of
408            // the files are missing, exit early. Otherwise error out.
409            let mut blobs = vec![];
410            let mut seen_links = HashMap::new();
411
412            let mut stream = std::pin::pin!(stream);
413
414            while let Some((blob, result)) = stream.next().await {
415                match result {
416                    Ok(metadata) => {
417                        // Skip over any hardlinks we've already seen.
418                        if let Some(first_merkle) =
419                            seen_links.insert((metadata.dev(), metadata.ino()), blob.merkle)
420                        {
421                            if first_merkle == blob.merkle {
422                                continue;
423                            } else {
424                                return Err(anyhow!(MerkleHardLinkMismatchError {
425                                    hash1: first_merkle,
426                                    hash2: blob.merkle
427                                }));
428                            }
429                        }
430
431                        if package.manifest().delivery_blob_type().is_none()
432                            && blob.size != metadata.len()
433                        {
434                            if self.ignore_missing_packages {
435                                return Ok(false);
436                            } else {
437                                return Err(anyhow!(BlobSizeMismatchError {
438                                    hash: blob.merkle,
439                                    path: blob.source_path.clone(),
440                                    manifest_size: blob.size,
441                                    file_size: metadata.len(),
442                                }));
443                            }
444                        }
445
446                        blobs.push(blob);
447                    }
448                    Err(err) => {
449                        if self.ignore_missing_packages
450                            && err.kind() == std::io::ErrorKind::NotFound
451                        {
452                            return Ok(false);
453                        } else {
454                            return Err(err).with_context(|| {
455                                format!("checking if {} exists", blob.source_path)
456                            });
457                        }
458                    }
459                }
460            }
461
462            blobs
463        };
464
465        // Merge our blobs in to be staged. We can't do the merge above because
466        // the stream has an immutable reference to `to_be_staged_blobs` so it
467        // can filter our already staged blobs.
468        for blob in blobs {
469            deps.insert(Utf8PathBuf::from(&blob.source_path));
470            to_be_staged_blobs.insert(
471                blob.merkle,
472                StagedBlob {
473                    info: blob.clone(),
474                    delivery_blob_type: package.manifest().delivery_blob_type(),
475                },
476            );
477        }
478
479        // Stage all subpackages.
480        for subpackage in package.manifest().subpackages() {
481            // We only need to stage the subpackage if we haven't staged this merkle.
482            if !self.staged_packages.contains_key(&subpackage.merkle)
483                && !to_be_staged_packages.contains_key(&subpackage.merkle)
484            {
485                let manifest_path = Utf8PathBuf::from(&subpackage.manifest_path);
486
487                // Don't stage the package if any subpackages are missing.
488                if !self
489                    .stage_subpackage(
490                        manifest_path,
491                        to_be_staged_packages,
492                        to_be_staged_blobs,
493                        deps,
494                    )
495                    .await?
496                {
497                    return Ok(false);
498                }
499            }
500        }
501
502        to_be_staged_packages.insert(package_hash, package);
503
504        Ok(true)
505    }
506
507    /// Stage a subpackage's package manifest from the `path` to be published.
508    async fn stage_subpackage(
509        &self,
510        path: Utf8PathBuf,
511        to_be_staged_packages: &mut HashMap<Hash, ToBeStagedPackage>,
512        to_be_staged_blobs: &mut HashMap<Hash, StagedBlob>,
513        deps: &mut HashSet<Utf8PathBuf>,
514    ) -> Result<bool> {
515        let contents = match fs::read(path.as_std_path()) {
516            Ok(contents) => contents,
517            Err(err) => {
518                if self.ignore_missing_packages && err.kind() == std::io::ErrorKind::NotFound {
519                    return Ok(false);
520                }
521
522                return Err(err).with_context(|| format!("reading package manifest {path}"));
523            }
524        };
525
526        let manifest = PackageManifest::from_reader(&path, &contents[..])
527            .with_context(|| format!("parsing package manifest {path}"))?;
528
529        // Stage the subpackage. We will use `stage_package_contents` since we don't need to include
530        // the package in the TUF metadata. We're recursing, so we need to box our future.
531        let fut: Pin<Box<dyn Future<Output = _>>> = Box::pin(self.stage_package(
532            ToBeStagedPackage {
533                manifest_path: Some(path),
534                kind: ToBeStagedPackageKind::Manifest { manifest },
535            },
536            to_be_staged_packages,
537            to_be_staged_blobs,
538            deps,
539        ));
540
541        fut.await
542    }
543
544    /// Stage all the packages pointed to by the package list to be published.
545    /// Paths in the package list file are relative to the directory that contains the package list.
546    pub async fn add_package_list(mut self, path: Utf8PathBuf) -> Result<RepoBuilder<'a, R>> {
547        let contents = fs::read(path.as_std_path())
548            .with_context(|| format!("reading package manifest list {path}"))?;
549
550        let package_list_manifest = PackageManifestList::from_reader(&path, &contents[..])
551            .with_context(|| format!("reading package manifest list {path}"))?;
552
553        self.deps.insert(path.clone());
554
555        self.add_packages(package_list_manifest.into_iter()).await
556    }
557
558    /// Stage all the packages pointed to by the iterator of package lists to be published.
559    pub async fn add_package_lists(
560        mut self,
561        paths: impl Iterator<Item = Utf8PathBuf>,
562    ) -> Result<RepoBuilder<'a, R>> {
563        for path in paths {
564            self = self.add_package_list(path).await?;
565        }
566
567        Ok(self)
568    }
569
570    /// Read all remaining subpackages, and then commit the changes to the
571    /// repository.
572    ///
573    /// Returns the list of the files that were read and the staged blobs.
574    pub async fn commit(self) -> Result<(HashSet<Utf8PathBuf>, BTreeSet<BlobInfo>)> {
575        let repo_builder = if let Some(database) = self.database.as_ref() {
576            TufRepoBuilder::from_database(&self.repo, database)
577        } else {
578            TufRepoBuilder::create(&self.repo)
579        };
580
581        // Create a repo builder for the metadata, and initialize it with our repository keys.
582        let mut repo_builder = repo_builder
583            .current_time(self.current_time)
584            .time_versioning(self.time_versioning)
585            .root_expiration_duration(Duration::days(DEFAULT_ROOT_EXPIRATION))
586            .targets_expiration_duration(Duration::days(DEFAULT_TARGETS_EXPIRATION))
587            .snapshot_expiration_duration(Duration::days(DEFAULT_SNAPSHOT_EXPIRATION))
588            .timestamp_expiration_duration(Duration::days(DEFAULT_TIMESTAMP_EXPIRATION));
589
590        if let Some(signing_repo_keys) = self.signing_repo_keys {
591            for key in signing_repo_keys.root_keys() {
592                repo_builder = repo_builder.signing_root_keys(&[&**key]);
593            }
594
595            for key in signing_repo_keys.targets_keys() {
596                repo_builder = repo_builder.signing_targets_keys(&[&**key]);
597            }
598
599            for key in signing_repo_keys.snapshot_keys() {
600                repo_builder = repo_builder.signing_snapshot_keys(&[&**key]);
601            }
602
603            for key in signing_repo_keys.timestamp_keys() {
604                repo_builder = repo_builder.signing_timestamp_keys(&[&**key]);
605            }
606        }
607
608        for key in self.trusted_repo_keys.root_keys() {
609            repo_builder = repo_builder.trusted_root_keys(&[&**key]);
610        }
611
612        for key in self.trusted_repo_keys.targets_keys() {
613            repo_builder = repo_builder.trusted_targets_keys(&[&**key]);
614        }
615
616        for key in self.trusted_repo_keys.snapshot_keys() {
617            repo_builder = repo_builder.trusted_snapshot_keys(&[&**key]);
618        }
619
620        for key in self.trusted_repo_keys.timestamp_keys() {
621            repo_builder = repo_builder.trusted_timestamp_keys(&[&**key]);
622        }
623
624        // We can't generate a new root if we don't have any root keys.
625        let mut repo_builder = if self.trusted_repo_keys.root_keys().is_empty() {
626            repo_builder.skip_root()
627        } else if self.refresh_metadata {
628            repo_builder.stage_root()?
629        } else {
630            repo_builder.stage_root_if_necessary()?
631        };
632
633        repo_builder = repo_builder
634            .inherit_from_trusted_targets(self.inherit_from_trusted_targets)
635            .target_hash_algorithms(&[HashAlgorithm::Sha512]);
636
637        let mut package_meta_fars = HashMap::new();
638        for (package_path, package_hash) in &self.named_packages {
639            let meta_far_blob = self.staged_blobs.get(package_hash).unwrap();
640            package_meta_fars.insert(package_path, meta_far_blob);
641        }
642
643        // Stage the metadata blobs.
644        for (package_path, meta_far_blob) in package_meta_fars {
645            let target_path = TargetPath::new(package_path.to_string())?;
646            let mut custom = HashMap::new();
647
648            custom.insert("merkle".into(), serde_json::to_value(meta_far_blob.info.merkle)?);
649            custom.insert("size".into(), serde_json::to_value(meta_far_blob.info.size)?);
650
651            match meta_far_blob.delivery_blob_type {
652                Some(_) => {
653                    let delivery_blob = std::fs::read(&meta_far_blob.info.source_path)?;
654                    let meta_far_blob =
655                        delivery_blob::decompress(&delivery_blob).with_context(|| {
656                            format!(
657                                "decompressing delivery blob {}",
658                                meta_far_blob.info.source_path
659                            )
660                        })?;
661
662                    repo_builder = repo_builder
663                        .add_target_with_custom(
664                            target_path,
665                            futures::io::Cursor::new(meta_far_blob),
666                            custom,
667                        )
668                        .await?;
669                }
670                None => {
671                    let f = File::open(&meta_far_blob.info.source_path)?;
672
673                    repo_builder = repo_builder
674                        .add_target_with_custom(
675                            target_path,
676                            futures::io::AllowStdIo::new(f),
677                            custom,
678                        )
679                        .await?;
680                }
681            }
682        }
683
684        // Stage the targets metadata. If we're forcing a metadata refresh, force a new targets,
685        // snapshot, and timestamp, even if nothing changed in the contents.
686        let repo_builder = if self.refresh_metadata || self.refresh_non_root_metadata {
687            repo_builder.stage_targets()?
688        } else {
689            repo_builder.stage_targets_if_necessary()?
690        };
691
692        let repo_builder = repo_builder
693            .snapshot_includes_length(true)
694            .snapshot_includes_hashes(&[HashAlgorithm::Sha512]);
695
696        let repo_builder = if self.refresh_metadata || self.refresh_non_root_metadata {
697            repo_builder.stage_snapshot()?
698        } else {
699            repo_builder.stage_snapshot_if_necessary()?
700        };
701
702        let repo_builder = repo_builder
703            .timestamp_includes_length(true)
704            .timestamp_includes_hashes(&[HashAlgorithm::Sha512]);
705
706        let repo_builder = if self.refresh_metadata || self.refresh_non_root_metadata {
707            repo_builder.stage_timestamp()?
708        } else {
709            repo_builder.stage_timestamp_if_necessary()?
710        };
711
712        repo_builder.commit().await.context("publishing metadata")?;
713
714        // Stage the blobs.
715        let () = futures::stream::iter(&self.staged_blobs)
716            .map(Ok)
717            .try_for_each_concurrent(
718                std::thread::available_parallelism()?.get(),
719                |(blob_hash, blob)| match blob.delivery_blob_type {
720                    Some(delivery_blob_type) => self.repo.store_delivery_blob(
721                        blob_hash,
722                        Utf8Path::new(&blob.info.source_path),
723                        delivery_blob_type,
724                    ),
725                    None => self.repo.store_blob(
726                        blob_hash,
727                        blob.info.size,
728                        Utf8Path::new(&blob.info.source_path),
729                    ),
730                },
731            )
732            .await?;
733
734        Ok((self.deps, self.staged_blobs.into_values().map(|blob| blob.info).collect()))
735    }
736}
737
738fn check_manifests_are_equivalent(
739    old_package: &ToBeStagedPackage,
740    new_package: &ToBeStagedPackage,
741) -> Result<()> {
742    // Check if the packages conflict.
743    if old_package.manifest().hash() == new_package.manifest().hash() {
744        return Ok(());
745    }
746
747    // Create a message that tries to explain why we have a conflict.
748    let old_manifest_path =
749        old_package.manifest_path.as_ref().map(|path| path.as_str()).unwrap_or("<generated>");
750    let new_manifest_path =
751        new_package.manifest_path.as_ref().map(|path| path.as_str()).unwrap_or("<generated>");
752
753    let mut msg = vec![format!(
754        "conflict between package manifests\
755        \n  manifest paths:\
756        \n  - {old_manifest_path}\
757        \n  - {new_manifest_path}\
758        \n  differences:",
759    )];
760
761    #[derive(PartialEq, Eq)]
762    enum BlobEntry {
763        Contents(Vec<u8>),
764        Blob(BlobInfo),
765    }
766
767    // Helper to read in all the package contents so we can compare entries.
768    fn manifest_contents(manifest: &PackageManifest) -> Result<BTreeMap<String, BlobEntry>> {
769        let mut entries = BTreeMap::new();
770
771        for blob in manifest.blobs() {
772            if blob.path == "meta/" {
773                let file = std::fs::File::open(&blob.source_path)
774                    .with_context(|| format!("reading {}", blob.path))?;
775                let mut far = fuchsia_archive::Utf8Reader::new(file)?;
776
777                let far_entries =
778                    far.list().map(|entry| entry.path().to_owned()).collect::<Vec<_>>();
779                for path in far_entries {
780                    let contents = far.read_file(&path)?;
781                    entries.insert(path, BlobEntry::Contents(contents));
782                }
783            }
784
785            entries.insert(blob.path.clone(), BlobEntry::Blob(blob.clone()));
786        }
787
788        Ok(entries)
789    }
790
791    // Compare the contents and report any differences.
792    if let (Ok(old_contents), Ok(mut new_contents)) =
793        (manifest_contents(old_package.manifest()), manifest_contents(new_package.manifest()))
794    {
795        for (path, old_entry) in old_contents {
796            if let Some(new_entry) = new_contents.remove(&path) {
797                match (old_entry, new_entry) {
798                    (BlobEntry::Blob(old_blob), BlobEntry::Blob(new_blob)) => {
799                        if old_blob.merkle != new_blob.merkle {
800                            msg.push(format!(
801                                "  - {}: different contents found in:\n    - {}\n    - {}",
802                                path, old_blob.source_path, new_blob.source_path
803                            ));
804                        }
805                    }
806                    (old_entry, new_entry) => {
807                        if old_entry != new_entry {
808                            msg.push(format!("  - {path}: different contents"));
809                        }
810                    }
811                }
812            } else {
813                msg.push(format!("  - {path}: missing from manifest {new_manifest_path}"));
814            }
815        }
816
817        for path in new_contents.into_keys() {
818            msg.push(format!("  - {path}: missing from manifest {old_manifest_path}"));
819        }
820    }
821
822    // Helper to read in all the subpackages so we can compare entries.
823    fn manifest_subpackages(
824        manifest: &PackageManifest,
825    ) -> Result<BTreeMap<String, SubpackageInfo>> {
826        let mut entries = BTreeMap::new();
827        for subpackage in manifest.subpackages() {
828            entries.insert(subpackage.name.clone(), subpackage.clone());
829        }
830        Ok(entries)
831    }
832
833    // Compare the subpackages and report any differences.
834    if let (Ok(old_subpackages), Ok(mut new_subpackages)) =
835        (manifest_subpackages(old_package.manifest()), manifest_subpackages(new_package.manifest()))
836    {
837        for (name, old_subpackage) in old_subpackages {
838            if let Some(new_subpackage) = new_subpackages.remove(&name) {
839                if old_subpackage.merkle != new_subpackage.merkle {
840                    msg.push(format!(
841                        "  - {}: different subpackages found in:\n    - {}\n    - {}",
842                        name, old_subpackage.manifest_path, new_subpackage.manifest_path
843                    ));
844                }
845            } else {
846                msg.push(format!(
847                    "  - {name}: subpackage missing from manifest {new_manifest_path}"
848                ));
849            }
850        }
851
852        for name in new_subpackages.into_keys() {
853            msg.push(format!("  - {name}: subpackage missing from manifest {old_manifest_path}"));
854        }
855    }
856
857    Err(anyhow!(msg.join("\n")))
858}
859
860#[cfg(not(target_os = "fuchsia"))]
861#[cfg(test)]
862mod tests {
863    use super::*;
864    use crate::repository::{FileSystemRepository, PmRepository};
865    use crate::test_utils;
866    use assert_matches::assert_matches;
867    use fuchsia_pkg::PackageBuilder;
868    use pretty_assertions::{assert_eq, assert_ne};
869    use std::io::Write as _;
870    use std::os::unix::fs::PermissionsExt as _;
871    use tuf::crypto::Ed25519PrivateKey;
872    use tuf::metadata::{Metadata as _, MetadataPath};
873    use walkdir::WalkDir;
874
875    pub(crate) fn read_dir(dir: &Utf8Path) -> BTreeMap<String, Vec<u8>> {
876        let mut entries = BTreeMap::new();
877        for entry in WalkDir::new(dir) {
878            let entry = entry.unwrap();
879            if entry.metadata().unwrap().is_file() {
880                let path = entry.path().strip_prefix(dir).unwrap().to_str().unwrap().to_string();
881                let contents = std::fs::read(entry.path()).unwrap();
882
883                entries.insert(path, contents);
884            }
885        }
886
887        entries
888    }
889
890    #[fuchsia_async::run_singlethreaded(test)]
891    async fn test_create() {
892        let tmp = tempfile::tempdir().unwrap();
893        let dir = Utf8Path::from_path(tmp.path()).unwrap();
894
895        let repo = PmRepository::new(dir.to_path_buf());
896        let repo_keys = test_utils::make_repo_keys();
897
898        RepoBuilder::create(&repo, &repo_keys).commit().await.unwrap();
899
900        // Make sure we can update a client from this metadata.
901        let mut repo_client = RepoClient::from_trusted_remote(repo).await.unwrap();
902        assert_matches!(repo_client.update().await, Ok(true));
903    }
904
905    #[fuchsia_async::run_singlethreaded(test)]
906    async fn test_create_and_update_repo() {
907        let tmp = tempfile::tempdir().unwrap();
908        let dir = Utf8Path::from_path(tmp.path()).unwrap();
909
910        let metadata_repo_path = dir.join("metadata");
911        let blob_repo_path = dir.join("blobs");
912        let repo = FileSystemRepository::new(metadata_repo_path, blob_repo_path.clone());
913        let repo_keys = test_utils::make_repo_keys();
914
915        let pkg1_dir = dir.join("package1");
916        let (pkg1_meta_far_path, pkg1_manifest) =
917            test_utils::make_package_manifest("package1", pkg1_dir.as_std_path(), Vec::new());
918        let pkg1_manifest_path = pkg1_dir.join("package1.manifest");
919        serde_json::to_writer(std::fs::File::create(&pkg1_manifest_path).unwrap(), &pkg1_manifest)
920            .unwrap();
921        let pkg1_meta_far_contents = std::fs::read(&pkg1_meta_far_path).unwrap();
922
923        RepoBuilder::create(&repo, &repo_keys)
924            .add_package(pkg1_manifest_path)
925            .await
926            .unwrap()
927            .commit()
928            .await
929            .unwrap();
930
931        // Make sure we wrote all the blobs from package1.
932        assert_eq!(
933            read_dir(&blob_repo_path),
934            [
935                (test_utils::PKG1_HASH, pkg1_meta_far_contents.clone()),
936                (test_utils::PKG1_BIN_HASH, b"binary package1".to_vec()),
937                (test_utils::PKG1_LIB_HASH, b"lib package1".to_vec()),
938            ]
939            .into_iter()
940            .map(|(hash, content)| (
941                format!("1/{hash}"),
942                delivery_blob::generate(delivery_blob::DeliveryBlobType::Type1, &content)
943            ),)
944            .collect()
945        );
946
947        // Make sure we can update a client from this metadata.
948        let mut repo_client = RepoClient::from_trusted_remote(repo).await.unwrap();
949        assert_matches!(repo_client.update().await, Ok(true));
950
951        assert_eq!(repo_client.database().trusted_root().version(), 1);
952        assert_eq!(repo_client.database().trusted_targets().map(|m| m.version()), Some(1));
953        assert_eq!(repo_client.database().trusted_snapshot().map(|m| m.version()), Some(1));
954        assert_eq!(repo_client.database().trusted_timestamp().map(|m| m.version()), Some(1));
955
956        // Create the next version of the metadata and add a new package to it.
957        let pkg2_dir = dir.join("package2");
958        let (pkg2_meta_far_path, pkg2_manifest) =
959            test_utils::make_package_manifest("package2", pkg2_dir.as_std_path(), Vec::new());
960        let pkg2_manifest_path = pkg2_dir.join("package2.manifest");
961        serde_json::to_writer(std::fs::File::create(&pkg2_manifest_path).unwrap(), &pkg2_manifest)
962            .unwrap();
963        let pkg2_meta_far_contents = std::fs::read(&pkg2_meta_far_path).unwrap();
964
965        let archive_outdir = TempDir::new().unwrap();
966
967        let archive_path = archive_outdir.path().join("p2.far");
968        let archive_file = fs::File::create(archive_path.clone()).unwrap();
969        pkg2_manifest.archive(&pkg2_dir, &archive_file).await.unwrap();
970
971        RepoBuilder::from_client(&repo_client, &repo_keys)
972            .add_package_archive(Utf8PathBuf::from_path_buf(archive_path).unwrap())
973            .await
974            .unwrap()
975            .commit()
976            .await
977            .unwrap();
978
979        // Make sure we wrote all the blobs from package1 and package2.
980        assert_eq!(
981            read_dir(&blob_repo_path),
982            [
983                (test_utils::PKG1_HASH, pkg1_meta_far_contents.clone()),
984                (test_utils::PKG1_BIN_HASH, b"binary package1".to_vec()),
985                (test_utils::PKG1_LIB_HASH, b"lib package1".to_vec()),
986                (test_utils::PKG2_HASH, pkg2_meta_far_contents.clone()),
987                (test_utils::PKG2_BIN_HASH, b"binary package2".to_vec()),
988                (test_utils::PKG2_LIB_HASH, b"lib package2".to_vec()),
989            ]
990            .into_iter()
991            .map(|(hash, content)| (
992                format!("1/{hash}"),
993                delivery_blob::generate(delivery_blob::DeliveryBlobType::Type1, &content)
994            ))
995            .collect()
996        );
997
998        // Make sure we can resolve the new metadata.
999        assert_matches!(repo_client.update().await, Ok(true));
1000        assert_eq!(repo_client.database().trusted_root().version(), 1);
1001        assert_eq!(repo_client.database().trusted_targets().map(|m| m.version()), Some(2));
1002        assert_eq!(repo_client.database().trusted_snapshot().map(|m| m.version()), Some(2));
1003        assert_eq!(repo_client.database().trusted_timestamp().map(|m| m.version()), Some(2));
1004
1005        // Make sure the timestamp and snapshot metadata was generated with the snapshot and targets
1006        // length and hashes.
1007        let snapshot_description = repo_client.database().trusted_timestamp().unwrap().snapshot();
1008        assert!(snapshot_description.length().is_some());
1009        assert!(!snapshot_description.hashes().is_empty());
1010
1011        let trusted_snapshot = repo_client.database().trusted_snapshot().unwrap();
1012        let targets_description = trusted_snapshot.meta().get(&MetadataPath::targets()).unwrap();
1013        assert!(targets_description.length().is_some());
1014        assert!(!targets_description.hashes().is_empty());
1015    }
1016
1017    #[fuchsia_async::run_singlethreaded(test)]
1018    async fn test_create_and_update_repo_with_subpackages() {
1019        let tmp = tempfile::tempdir().unwrap();
1020        let dir = Utf8Path::from_path(tmp.path()).unwrap();
1021
1022        let metadata_repo_path = dir.join("metadata");
1023        let blob_repo_path = dir.join("blobs");
1024        let repo = FileSystemRepository::new(metadata_repo_path, blob_repo_path.clone());
1025        let repo_keys = test_utils::make_repo_keys();
1026
1027        const ANONYMOUS_SUBPACKAGE: &str = "anonymous_subpackage";
1028        const NAMED_SUBPACKAGE: &str = "named_subpackage";
1029        const NAMED_PACKAGE: &str = "named_package";
1030        const SUPERPACKAGE: &str = "superpackage";
1031
1032        // Create an anonymous subpackage (a subpackage that is not directly
1033        // added to the RepoBuilder, but is added indirectly because it is
1034        // referenced as a subpackage of another package added to the repo).
1035        let anonsubpkg_dir = dir.join(ANONYMOUS_SUBPACKAGE);
1036        let (anonsubpkg_meta_far_path, anonsubpkg_manifest) = test_utils::make_package_manifest(
1037            ANONYMOUS_SUBPACKAGE,
1038            anonsubpkg_dir.as_std_path(),
1039            Vec::new(),
1040        );
1041        let anonsubpkg_manifest_path = anonsubpkg_dir.join("anonymous_subpackage.manifest");
1042        serde_json::to_writer(
1043            std::fs::File::create(&anonsubpkg_manifest_path).unwrap(),
1044            &anonsubpkg_manifest,
1045        )
1046        .unwrap();
1047        let anonsubpkg_meta_far_contents = std::fs::read(&anonsubpkg_meta_far_path).unwrap();
1048
1049        // Create a named package (named_subpackage), which will also be a
1050        // subpackage of "superpackage". This named_subpackage will include the
1051        // anonymous_subpackage.
1052        let namedsubpkg_dir = dir.join(NAMED_SUBPACKAGE);
1053        let (namedsubpkg_meta_far_path, namedsubpkg_manifest) = test_utils::make_package_manifest(
1054            NAMED_SUBPACKAGE,
1055            namedsubpkg_dir.as_std_path(),
1056            vec![(
1057                "anon_subpackage_of_namedsubpkg".parse().unwrap(),
1058                anonsubpkg_manifest.hash(),
1059                anonsubpkg_manifest_path.clone().into(),
1060            )],
1061        );
1062        let namedsubpkg_manifest_path = namedsubpkg_dir.join("named_subpackage.manifest");
1063        serde_json::to_writer(
1064            std::fs::File::create(&namedsubpkg_manifest_path).unwrap(),
1065            &namedsubpkg_manifest,
1066        )
1067        .unwrap();
1068        let namedsubpkg_meta_far_contents = std::fs::read(&namedsubpkg_meta_far_path).unwrap();
1069
1070        // Make a package that's a duplicate of `named_subpackage` but with different files. This
1071        // will be added after `named_subpackage`, so we shouldn't try to read any of these files.
1072        let namedpkg_dir = dir.join(NAMED_PACKAGE);
1073        let (_, namedpkg_manifest) = test_utils::make_package_manifest(
1074            NAMED_SUBPACKAGE,
1075            namedpkg_dir.as_std_path(),
1076            vec![(
1077                "anon_subpackage_of_namedsubpkg".parse().unwrap(),
1078                anonsubpkg_manifest.hash(),
1079                anonsubpkg_manifest_path.clone().into(),
1080            )],
1081        );
1082        let namedpkg_manifest_path = namedpkg_dir.join("named.manifest");
1083        serde_json::to_writer(
1084            std::fs::File::create(&namedpkg_manifest_path).unwrap(),
1085            &namedpkg_manifest,
1086        )
1087        .unwrap();
1088
1089        // Create a named package ("superpackage"), which will also be a superpackage
1090        // of both named_subpackage and anonymous_subpackage. Note that
1091        // named_subpackage is ALSO a superpackage of anonymous_subpackage, so
1092        // anonymous_subpackage is referenced twice. It will only exist once in
1093        // the repo.
1094        let superpkg_dir = dir.join(SUPERPACKAGE);
1095        let (superpkg_meta_far_path, superpkg_manifest) = test_utils::make_package_manifest(
1096            SUPERPACKAGE,
1097            superpkg_dir.as_std_path(),
1098            vec![
1099                (
1100                    NAMED_SUBPACKAGE.parse().unwrap(),
1101                    namedsubpkg_manifest.hash(),
1102                    namedsubpkg_manifest_path.clone().into(),
1103                ),
1104                (
1105                    "anon_subpackage_of_superpkg".parse().unwrap(),
1106                    anonsubpkg_manifest.hash(),
1107                    anonsubpkg_manifest_path.clone().into(),
1108                ),
1109            ],
1110        );
1111        let superpkg_manifest_path = superpkg_dir.join("superpackage.manifest");
1112        serde_json::to_writer(
1113            std::fs::File::create(&superpkg_manifest_path).unwrap(),
1114            &superpkg_manifest,
1115        )
1116        .unwrap();
1117        let superpkg_meta_far_contents = std::fs::read(&superpkg_meta_far_path).unwrap();
1118
1119        // Add the two named packages. The anonymous subpackage will be added
1120        // automatically.
1121        let (actual_deps, _) = RepoBuilder::create(&repo, &repo_keys)
1122            .add_package(superpkg_manifest_path.clone())
1123            .await
1124            .unwrap()
1125            .add_package(namedsubpkg_manifest_path.clone())
1126            .await
1127            .unwrap()
1128            .add_package(namedpkg_manifest_path.clone())
1129            .await
1130            .unwrap()
1131            .commit()
1132            .await
1133            .unwrap();
1134
1135        let mut expected_deps = BTreeSet::new();
1136        expected_deps.insert(anonsubpkg_manifest_path);
1137        expected_deps.extend(
1138            anonsubpkg_manifest.blobs().iter().map(|blob| Utf8PathBuf::from(&blob.source_path)),
1139        );
1140        expected_deps.insert(namedsubpkg_manifest_path);
1141        expected_deps.extend(
1142            namedsubpkg_manifest.blobs().iter().map(|blob| Utf8PathBuf::from(&blob.source_path)),
1143        );
1144        expected_deps.insert(superpkg_manifest_path);
1145        expected_deps.extend(
1146            superpkg_manifest.blobs().iter().map(|blob| Utf8PathBuf::from(&blob.source_path)),
1147        );
1148        // We should only read from the `named_package` manifest, but none of the files that are in
1149        // the manifest.
1150        expected_deps.insert(namedpkg_manifest_path);
1151
1152        assert_eq!(actual_deps.into_iter().collect::<BTreeSet<_>>(), expected_deps);
1153
1154        let repo_blobs = read_dir(&blob_repo_path);
1155
1156        assert_eq!(
1157            repo_blobs.keys().map(|k| k.to_owned()).collect::<BTreeSet<String>>(),
1158            [
1159                test_utils::ANONSUBPKG_HASH,
1160                test_utils::ANONSUBPKG_BIN_HASH,
1161                test_utils::ANONSUBPKG_LIB_HASH,
1162                test_utils::NAMEDSUBPKG_HASH,
1163                test_utils::NAMEDSUBPKG_BIN_HASH,
1164                test_utils::NAMEDSUBPKG_LIB_HASH,
1165                test_utils::SUPERPKG_HASH,
1166                test_utils::SUPERPKG_BIN_HASH,
1167                test_utils::SUPERPKG_LIB_HASH,
1168            ]
1169            .into_iter()
1170            .map(|hash| format!("1/{hash}"))
1171            .collect()
1172        );
1173
1174        // Make sure we wrote all the blobs from package1.
1175        assert_eq!(
1176            read_dir(&blob_repo_path),
1177            [
1178                (test_utils::ANONSUBPKG_HASH, anonsubpkg_meta_far_contents.clone()),
1179                (test_utils::ANONSUBPKG_BIN_HASH, b"binary anonymous_subpackage".to_vec()),
1180                (test_utils::ANONSUBPKG_LIB_HASH, b"lib anonymous_subpackage".to_vec()),
1181                (test_utils::NAMEDSUBPKG_HASH, namedsubpkg_meta_far_contents.clone()),
1182                (test_utils::NAMEDSUBPKG_BIN_HASH, b"binary named_subpackage".to_vec()),
1183                (test_utils::NAMEDSUBPKG_LIB_HASH, b"lib named_subpackage".to_vec()),
1184                (test_utils::SUPERPKG_HASH, superpkg_meta_far_contents.clone()),
1185                (test_utils::SUPERPKG_BIN_HASH, b"binary superpackage".to_vec()),
1186                (test_utils::SUPERPKG_LIB_HASH, b"lib superpackage".to_vec()),
1187            ]
1188            .into_iter()
1189            .map(|(hash, content)| (
1190                format!("1/{hash}"),
1191                delivery_blob::generate(delivery_blob::DeliveryBlobType::Type1, &content)
1192            ))
1193            .collect()
1194        );
1195
1196        // Make sure we can update a client from this metadata.
1197        let mut repo_client = RepoClient::from_trusted_remote(repo).await.unwrap();
1198        assert_matches!(repo_client.update().await, Ok(true));
1199
1200        assert_eq!(repo_client.database().trusted_root().version(), 1);
1201        assert_eq!(repo_client.database().trusted_targets().map(|m| m.version()), Some(1));
1202        assert_eq!(repo_client.database().trusted_snapshot().map(|m| m.version()), Some(1));
1203        assert_eq!(repo_client.database().trusted_timestamp().map(|m| m.version()), Some(1));
1204
1205        // Make sure we have targets for the named packages only.
1206        let trusted_targets = repo_client.database().trusted_targets().unwrap();
1207        assert!(
1208            trusted_targets
1209                .targets()
1210                .get(&TargetPath::new(format!("{SUPERPACKAGE}/0")).unwrap())
1211                .is_some()
1212        );
1213        assert!(
1214            trusted_targets
1215                .targets()
1216                .get(&TargetPath::new(format!("{NAMED_SUBPACKAGE}/0")).unwrap())
1217                .is_some()
1218        );
1219        assert!(
1220            trusted_targets
1221                .targets()
1222                .get(&TargetPath::new(format!("{ANONYMOUS_SUBPACKAGE}/0")).unwrap())
1223                .is_none()
1224        );
1225    }
1226
1227    #[fuchsia_async::run_singlethreaded(test)]
1228    async fn test_error_if_package_manifest_is_missing() {
1229        let tmp = tempfile::tempdir().unwrap();
1230        let dir = Utf8Path::from_path(tmp.path()).unwrap();
1231
1232        // Add the superpackage. This should fail because we haven't set `ignore_missing_files`.
1233        let metadata_repo_path = dir.join("metadata");
1234        let blob_repo_path = dir.join("blobs");
1235        let repo = FileSystemRepository::new(metadata_repo_path, blob_repo_path.clone());
1236        let repo_keys = test_utils::make_repo_keys();
1237
1238        // Try to stage the superpackage, which should error out because the
1239        // subpackage doesn't exist.
1240        assert_matches!(
1241            RepoBuilder::create(&repo, &repo_keys).add_package(dir.join("does-not-exist")).await,
1242            Err(_)
1243        );
1244    }
1245
1246    #[fuchsia_async::run_singlethreaded(test)]
1247    async fn test_do_not_stage_blobs_if_ignore_missing_files_and_package_files_are_missing() {
1248        let tmp = tempfile::tempdir().unwrap();
1249        let dir = Utf8Path::from_path(tmp.path()).unwrap();
1250
1251        let (_, pkg_manifest) =
1252            test_utils::make_package_manifest("package", dir.as_std_path(), vec![]);
1253
1254        // Delete a subpackage file.
1255        std::fs::remove_file(dir.join("package").join("meta.far")).unwrap();
1256
1257        // Commit the supackage to the repository. This should succeed because
1258        // `ignore_missing_files` is true.
1259        let metadata_repo_path = dir.join("metadata");
1260        let blob_repo_path = dir.join("blobs");
1261        let repo = FileSystemRepository::new(metadata_repo_path, blob_repo_path.clone());
1262        let repo_keys = test_utils::make_repo_keys();
1263
1264        let (actual_deps, committed_blobs) = RepoBuilder::create(&repo, &repo_keys)
1265            .ignore_missing_packages(true)
1266            .add_package_manifest(None, pkg_manifest.clone())
1267            .await
1268            .unwrap()
1269            .commit()
1270            .await
1271            .unwrap();
1272
1273        // However we shouldn't have tried to write anything to the repository.
1274        assert_eq!(actual_deps, HashSet::new());
1275        assert_eq!(committed_blobs, BTreeSet::new());
1276    }
1277
1278    #[fuchsia_async::run_singlethreaded(test)]
1279    async fn test_error_if_subpackage_manifest_is_missing() {
1280        let tmp = tempfile::tempdir().unwrap();
1281        let dir = Utf8Path::from_path(tmp.path()).unwrap();
1282
1283        // Make a subpackage.
1284        let subpkg_dir = dir.join("subpackage");
1285        let (_, subpkg_manifest) =
1286            test_utils::make_package_manifest("subpackage", subpkg_dir.as_std_path(), Vec::new());
1287
1288        let subpkg_manifest_path = subpkg_dir.join("subpackage.manifest");
1289        serde_json::to_writer(
1290            std::fs::File::create(&subpkg_manifest_path).unwrap(),
1291            &subpkg_manifest,
1292        )
1293        .unwrap();
1294
1295        // Make a superpackage that uses the subpackage.
1296        let superpkg_dir = dir.join("superpackage");
1297        let (_, superpkg_manifest) = test_utils::make_package_manifest(
1298            "superpackage",
1299            superpkg_dir.as_std_path(),
1300            vec![(
1301                "subpackage".parse().unwrap(),
1302                subpkg_manifest.hash(),
1303                subpkg_manifest_path.clone().into(),
1304            )],
1305        );
1306
1307        // Delete the subpackage manifest.
1308        std::fs::remove_file(&subpkg_manifest_path).unwrap();
1309
1310        // Add the superpackage. This should fail because we haven't set `ignore_missing_files`.
1311        let metadata_repo_path = dir.join("metadata");
1312        let blob_repo_path = dir.join("blobs");
1313        let repo = FileSystemRepository::new(metadata_repo_path, blob_repo_path.clone());
1314        let repo_keys = test_utils::make_repo_keys();
1315
1316        // Try to stage the superpackage, which should error out because the
1317        // subpackage doesn't exist.
1318        assert_matches!(
1319            RepoBuilder::create(&repo, &repo_keys)
1320                .add_package_manifest(None, superpkg_manifest)
1321                .await,
1322            Err(_)
1323        );
1324    }
1325
1326    #[fuchsia_async::run_singlethreaded(test)]
1327    async fn test_do_not_stage_blobs_if_ignore_missing_files_and_subpackage_manifest_is_missing() {
1328        let tmp = tempfile::tempdir().unwrap();
1329        let dir = Utf8Path::from_path(tmp.path()).unwrap();
1330
1331        // Create the subpackage.
1332        let subpkg_dir = dir.join("subpackage");
1333        let (_, subpkg_manifest) =
1334            test_utils::make_package_manifest("subpackage", subpkg_dir.as_std_path(), Vec::new());
1335
1336        let subpkg_manifest_path = subpkg_dir.join("subpackage.manifest");
1337        serde_json::to_writer(
1338            std::fs::File::create(&subpkg_manifest_path).unwrap(),
1339            &subpkg_manifest,
1340        )
1341        .unwrap();
1342
1343        // Create the superpackage that uses the subpackage.
1344        let superpkg_dir = dir.join("superpackage");
1345        let (_, superpkg_manifest) = test_utils::make_package_manifest(
1346            "superpackage",
1347            superpkg_dir.as_std_path(),
1348            vec![(
1349                "subpackage".parse().unwrap(),
1350                subpkg_manifest.hash(),
1351                subpkg_manifest_path.clone().into(),
1352            )],
1353        );
1354
1355        // Delete the subpackage manifest.
1356        std::fs::remove_file(&subpkg_manifest_path).unwrap();
1357
1358        // Commit the supackage to the repository. This should succeed because
1359        // `ignore_missing_files` is true.
1360        let metadata_repo_path = dir.join("metadata");
1361        let blob_repo_path = dir.join("blobs");
1362        let repo = FileSystemRepository::new(metadata_repo_path, blob_repo_path.clone());
1363        let repo_keys = test_utils::make_repo_keys();
1364
1365        let (actual_deps, committed_blobs) = RepoBuilder::create(&repo, &repo_keys)
1366            .ignore_missing_packages(true)
1367            .add_package_manifest(None, superpkg_manifest.clone())
1368            .await
1369            .unwrap()
1370            .commit()
1371            .await
1372            .unwrap();
1373
1374        // However we shouldn't have tried to write anything to the repository.
1375        assert_eq!(actual_deps, HashSet::new());
1376        assert_eq!(committed_blobs, BTreeSet::new());
1377    }
1378
1379    #[fuchsia_async::run_singlethreaded(test)]
1380    async fn test_do_not_stage_blobs_if_ignore_missing_files_and_subpackage_files_are_missing() {
1381        let tmp = tempfile::tempdir().unwrap();
1382        let dir = Utf8Path::from_path(tmp.path()).unwrap();
1383
1384        // Create the subpackage.
1385        let subpkg_dir = dir.join("subpackage");
1386        let (_, subpkg_manifest) =
1387            test_utils::make_package_manifest("subpackage", subpkg_dir.as_std_path(), Vec::new());
1388
1389        let subpkg_manifest_path = subpkg_dir.join("subpackage.manifest");
1390        serde_json::to_writer(
1391            std::fs::File::create(&subpkg_manifest_path).unwrap(),
1392            &subpkg_manifest,
1393        )
1394        .unwrap();
1395
1396        // Create the superpackage that uses the subpackage.
1397        let superpkg_dir = dir.join("superpackage");
1398        let (_, superpkg_manifest) = test_utils::make_package_manifest(
1399            "superpackage",
1400            superpkg_dir.as_std_path(),
1401            vec![(
1402                "subpackage".parse().unwrap(),
1403                subpkg_manifest.hash(),
1404                subpkg_manifest_path.clone().into(),
1405            )],
1406        );
1407
1408        // Delete a subpackage file.
1409        std::fs::remove_file(subpkg_dir.join("subpackage").join("meta.far")).unwrap();
1410
1411        // Commit the supackage to the repository. This should succeed because
1412        // `ignore_missing_files` is true.
1413        let metadata_repo_path = dir.join("metadata");
1414        let blob_repo_path = dir.join("blobs");
1415        let repo = FileSystemRepository::new(metadata_repo_path, blob_repo_path.clone());
1416        let repo_keys = test_utils::make_repo_keys();
1417
1418        let (actual_deps, committed_blobs) = RepoBuilder::create(&repo, &repo_keys)
1419            .ignore_missing_packages(true)
1420            .add_package_manifest(None, superpkg_manifest.clone())
1421            .await
1422            .unwrap()
1423            .commit()
1424            .await
1425            .unwrap();
1426
1427        // However we shouldn't have tried to write anything to the repository.
1428        assert_eq!(actual_deps, HashSet::new());
1429        assert_eq!(committed_blobs, BTreeSet::new());
1430    }
1431
1432    #[fuchsia_async::run_singlethreaded(test)]
1433    async fn test_refresh_metadata_with_all_keys() {
1434        let tmp = tempfile::tempdir().unwrap();
1435        let dir = Utf8Path::from_path(tmp.path()).unwrap();
1436
1437        // Load up the test metadata, which was created some time ago, and has a different
1438        // expiration date.
1439        let repo = test_utils::make_pm_repository(dir).await;
1440
1441        // Download the older metadata before we refresh it.
1442        let mut repo_client = RepoClient::from_trusted_remote(&repo).await.unwrap();
1443        repo_client.update().await.unwrap();
1444
1445        let root1 = (*repo_client.database().trusted_root()).clone();
1446        let targets1 = repo_client.database().trusted_targets().cloned().unwrap();
1447        let snapshot1 = repo_client.database().trusted_snapshot().cloned().unwrap();
1448        let timestamp1 = repo_client.database().trusted_timestamp().cloned().unwrap();
1449
1450        // Update the metadata expiration.
1451        let repo_keys = RepoKeys::from_dir(&dir.join("keys").into_std_path_buf()).unwrap();
1452        RepoBuilder::from_database(repo_client.remote_repo(), &repo_keys, repo_client.database())
1453            .refresh_metadata(true)
1454            .commit()
1455            .await
1456            .unwrap();
1457
1458        // Finally, make sure the metadata has changed.
1459        assert_matches!(repo_client.update().await, Ok(true));
1460
1461        let root2 = (*repo_client.database().trusted_root()).clone();
1462        let targets2 = repo_client.database().trusted_targets().cloned().unwrap();
1463        let snapshot2 = repo_client.database().trusted_snapshot().cloned().unwrap();
1464        let timestamp2 = repo_client.database().trusted_timestamp().cloned().unwrap();
1465
1466        // Make sure we generated new metadata.
1467        assert_ne!(root1, root2);
1468        assert_ne!(targets1, targets2);
1469        assert_ne!(snapshot1, snapshot2);
1470        assert_ne!(timestamp1, timestamp2);
1471
1472        // We should have kept our old snapshot entries (except the target should have changed).
1473        assert_eq!(
1474            snapshot1
1475                .meta()
1476                .iter()
1477                .filter(|(k, _)| **k != MetadataPath::targets())
1478                .collect::<HashMap<_, _>>(),
1479            snapshot2
1480                .meta()
1481                .iter()
1482                .filter(|(k, _)| **k != MetadataPath::targets())
1483                .collect::<HashMap<_, _>>(),
1484        );
1485
1486        // We should have kept our targets and delegations.
1487        assert_eq!(targets1.targets(), targets2.targets());
1488        assert_eq!(targets1.delegations(), targets2.delegations());
1489    }
1490
1491    #[fuchsia_async::run_singlethreaded(test)]
1492    async fn test_refresh_metadata_with_some_keys() {
1493        let tmp = tempfile::tempdir().unwrap();
1494        let dir = Utf8Path::from_path(tmp.path()).unwrap();
1495
1496        // Load the repo.
1497        let repo = test_utils::make_pm_repository(dir).await;
1498
1499        // Download the older metadata before we refresh it.
1500        let mut repo_client = RepoClient::from_trusted_remote(&repo).await.unwrap();
1501        repo_client.update().await.unwrap();
1502
1503        let root1 = (*repo_client.database().trusted_root()).clone();
1504        let targets1 = repo_client.database().trusted_targets().cloned().unwrap();
1505        let snapshot1 = repo_client.database().trusted_snapshot().cloned().unwrap();
1506        let timestamp1 = repo_client.database().trusted_timestamp().cloned().unwrap();
1507
1508        // Load the repo, but delete the root private key file.
1509        let keys_dir = dir.join("keys");
1510        std::fs::remove_file(keys_dir.join("root.json")).unwrap();
1511
1512        // Update the metadata expiration.
1513        let repo_keys = RepoKeys::from_dir(&dir.join("keys").into_std_path_buf()).unwrap();
1514
1515        // Update the metadata expiration should succeed.
1516        RepoBuilder::from_database(repo_client.remote_repo(), &repo_keys, repo_client.database())
1517            .refresh_metadata(true)
1518            .commit()
1519            .await
1520            .unwrap();
1521
1522        // Make sure the metadata has changed.
1523        assert_matches!(repo_client.update().await, Ok(true));
1524
1525        let root2 = (*repo_client.database().trusted_root()).clone();
1526        let targets2 = repo_client.database().trusted_targets().cloned().unwrap();
1527        let snapshot2 = repo_client.database().trusted_snapshot().cloned().unwrap();
1528        let timestamp2 = repo_client.database().trusted_timestamp().cloned().unwrap();
1529
1530        // Make sure we generated new metadata, except for the root metadata.
1531        assert_eq!(root1, root2);
1532        assert_ne!(targets1, targets2);
1533        assert_ne!(snapshot1, snapshot2);
1534        assert_ne!(timestamp1, timestamp2);
1535
1536        // We should have kept our old snapshot entries (except the target should have changed).
1537        assert_eq!(
1538            snapshot1
1539                .meta()
1540                .iter()
1541                .filter(|(k, _)| **k != MetadataPath::targets())
1542                .collect::<HashMap<_, _>>(),
1543            snapshot2
1544                .meta()
1545                .iter()
1546                .filter(|(k, _)| **k != MetadataPath::targets())
1547                .collect::<HashMap<_, _>>(),
1548        );
1549
1550        // We should have kept our targets and delegations.
1551        assert_eq!(targets1.targets(), targets2.targets());
1552        assert_eq!(targets1.delegations(), targets2.delegations());
1553    }
1554
1555    #[fuchsia_async::run_singlethreaded(test)]
1556    async fn test_refresh_metadata_with_no_keys() {
1557        let tmp = tempfile::tempdir().unwrap();
1558        let dir = Utf8Path::from_path(tmp.path()).unwrap();
1559
1560        // Load the repo.
1561        let repo = test_utils::make_pm_repository(dir).await;
1562
1563        // Download the older metadata before we refresh it.
1564        let mut repo_client = RepoClient::from_trusted_remote(&repo).await.unwrap();
1565        repo_client.update().await.unwrap();
1566
1567        let root1 = (*repo_client.database().trusted_root()).clone();
1568        let targets1 = repo_client.database().trusted_targets().cloned().unwrap();
1569        let snapshot1 = repo_client.database().trusted_snapshot().cloned().unwrap();
1570        let timestamp1 = repo_client.database().trusted_timestamp().cloned().unwrap();
1571
1572        // Try to refresh the metadata with an empty key set, which should error out.
1573        let repo_keys = RepoKeys::builder().build();
1574        let res = RepoBuilder::from_database(
1575            repo_client.remote_repo(),
1576            &repo_keys,
1577            repo_client.database(),
1578        )
1579        .refresh_metadata(true)
1580        .commit()
1581        .await;
1582        assert_matches!(res, Err(_));
1583
1584        // Updating the client should return that there were no changes.
1585        assert_matches!(repo_client.update().await, Ok(false));
1586
1587        let root2 = (*repo_client.database().trusted_root()).clone();
1588        let targets2 = repo_client.database().trusted_targets().cloned().unwrap();
1589        let snapshot2 = repo_client.database().trusted_snapshot().cloned().unwrap();
1590        let timestamp2 = repo_client.database().trusted_timestamp().cloned().unwrap();
1591
1592        // We should not have changed the metadata.
1593        assert_eq!(root1, root2);
1594        assert_eq!(targets1, targets2);
1595        assert_eq!(snapshot1, snapshot2);
1596        assert_eq!(timestamp1, timestamp2);
1597    }
1598
1599    #[fuchsia_async::run_singlethreaded(test)]
1600    async fn test_refresh_metadata_with_root_metadata() {
1601        let tmp = tempfile::tempdir().unwrap();
1602        let root = Utf8Path::from_path(tmp.path()).unwrap();
1603
1604        // First create a repository.
1605        let full_repo_path = root.join("full");
1606        let full_metadata_repo_path = full_repo_path.join("repository");
1607        test_utils::make_pm_repo_dir(full_repo_path.as_std_path()).await;
1608
1609        // Then create a repository, which only has the root metadata in it.
1610        let test_repo_path = root.join("test");
1611        let test_metadata_repo_path = test_repo_path.join("repository");
1612        std::fs::create_dir_all(&test_metadata_repo_path).unwrap();
1613
1614        std::fs::copy(
1615            full_metadata_repo_path.join("root.json"),
1616            test_metadata_repo_path.join("1.root.json"),
1617        )
1618        .unwrap();
1619
1620        // Create a repo client and download the root metadata. Update should fail with missint TUF
1621        // metadata since we don't have any other metadata.
1622        let repo = PmRepository::new(test_repo_path);
1623        let mut repo_client = RepoClient::from_trusted_remote(&repo).await.unwrap();
1624        assert_matches!(
1625            repo_client.update().await,
1626            Err(crate::repository::Error::Tuf(tuf::Error::MetadataNotFound { path, .. }))
1627            if path == tuf::metadata::MetadataPath::timestamp()
1628        );
1629
1630        assert!(repo_client.database().trusted_targets().is_none());
1631        assert!(repo_client.database().trusted_snapshot().is_none());
1632        assert!(repo_client.database().trusted_timestamp().is_none());
1633
1634        // Update the metadata expiration. We'll use the keys from the full pm directory.
1635        let repo_keys =
1636            RepoKeys::from_dir(&full_repo_path.join("keys").into_std_path_buf()).unwrap();
1637        RepoBuilder::from_database(repo_client.remote_repo(), &repo_keys, repo_client.database())
1638            .refresh_metadata(true)
1639            .commit()
1640            .await
1641            .unwrap();
1642
1643        // Updating the client should succeed since we created the missing metadata.
1644        assert_matches!(repo_client.update().await, Ok(true));
1645
1646        assert!(repo_client.database().trusted_targets().is_some());
1647        assert!(repo_client.database().trusted_snapshot().is_some());
1648        assert!(repo_client.database().trusted_timestamp().is_some());
1649    }
1650
1651    #[fuchsia_async::run_singlethreaded(test)]
1652    async fn test_inherit_from_trusted_targets() {
1653        let tmp = tempfile::tempdir().unwrap();
1654        let root = Utf8Path::from_path(tmp.path()).unwrap();
1655        let repo_dir = root.join("repo");
1656
1657        // Load the repo, which already contains package1 and package2.
1658        let repo = test_utils::make_pm_repository(repo_dir).await;
1659        let mut repo_client = RepoClient::from_trusted_remote(&repo).await.unwrap();
1660        repo_client.update().await.unwrap();
1661
1662        // Publish package3 to the repository.
1663        let pkg3_dir = root.join("pkg3");
1664        let (_, pkg3_manifest) =
1665            test_utils::make_package_manifest("package3", pkg3_dir.as_std_path(), Vec::new());
1666        let pkg3_manifest_path = pkg3_dir.join("package3.manifest");
1667        serde_json::to_writer(std::fs::File::create(&pkg3_manifest_path).unwrap(), &pkg3_manifest)
1668            .unwrap();
1669
1670        let repo_keys = test_utils::make_repo_keys();
1671        RepoBuilder::from_database(repo_client.remote_repo(), &repo_keys, repo_client.database())
1672            .add_package(pkg3_manifest_path)
1673            .await
1674            .unwrap()
1675            .commit()
1676            .await
1677            .unwrap();
1678
1679        // Make sure we have metadata for package1, package2, and package3.
1680        assert_matches!(repo_client.update().await, Ok(true));
1681        let trusted_targets = repo_client.database().trusted_targets().unwrap();
1682        assert!(trusted_targets.targets().get("package1/0").is_some());
1683        assert!(trusted_targets.targets().get("package2/0").is_some());
1684        assert!(trusted_targets.targets().get("package3/0").is_some());
1685
1686        // Now do another commit, but this time not inheriting the old packages.
1687        let pkg4_dir = root.join("pkg4");
1688        let (_, pkg4_manifest) =
1689            test_utils::make_package_manifest("package4", pkg4_dir.as_std_path(), Vec::new());
1690        let pkg4_manifest_path = pkg4_dir.join("package4.manifest");
1691        serde_json::to_writer(std::fs::File::create(&pkg4_manifest_path).unwrap(), &pkg4_manifest)
1692            .unwrap();
1693
1694        RepoBuilder::from_database(repo_client.remote_repo(), &repo_keys, repo_client.database())
1695            .inherit_from_trusted_targets(false)
1696            .add_package(pkg4_manifest_path)
1697            .await
1698            .unwrap()
1699            .commit()
1700            .await
1701            .unwrap();
1702
1703        // We should only have metadata for package4.
1704        assert_matches!(repo_client.update().await, Ok(true));
1705        let trusted_targets = repo_client.database().trusted_targets().unwrap();
1706        assert!(trusted_targets.targets().get("package1/0").is_none());
1707        assert!(trusted_targets.targets().get("package2/0").is_none());
1708        assert!(trusted_targets.targets().get("package3/0").is_none());
1709        assert!(trusted_targets.targets().get("package4/0").is_some());
1710    }
1711
1712    fn generate_ed25519_private_key() -> Ed25519PrivateKey {
1713        Ed25519PrivateKey::from_pkcs8(&Ed25519PrivateKey::pkcs8().unwrap()).unwrap()
1714    }
1715
1716    #[fuchsia_async::run_singlethreaded(test)]
1717    async fn test_key_rotation() {
1718        let tmp = tempfile::tempdir().unwrap();
1719        let root = Utf8Path::from_path(tmp.path()).unwrap();
1720        let repo_dir = root.join("repo");
1721
1722        // First, make a repository.
1723        let repo = test_utils::make_pm_repository(repo_dir).await;
1724        let mut repo_client = RepoClient::from_trusted_remote(&repo).await.unwrap();
1725        repo_client.update().await.unwrap();
1726
1727        // Then make a new RepoKeys with unique keys.
1728        let repo_trusted_keys = RepoKeys::builder()
1729            .add_root_key(Box::new(generate_ed25519_private_key()))
1730            .add_targets_key(Box::new(generate_ed25519_private_key()))
1731            .add_snapshot_key(Box::new(generate_ed25519_private_key()))
1732            .add_timestamp_key(Box::new(generate_ed25519_private_key()))
1733            .build();
1734
1735        // Generate new metadata that trusts the new keys, but signs it with the old keys.
1736        let repo_signing_keys = repo.repo_keys().unwrap();
1737        RepoBuilder::from_database(
1738            repo_client.remote_repo(),
1739            &repo_trusted_keys,
1740            repo_client.database(),
1741        )
1742        .signing_repo_keys(&repo_signing_keys)
1743        .commit()
1744        .await
1745        .unwrap();
1746
1747        // Make sure we can update.
1748        assert_matches!(repo_client.update().await, Ok(true));
1749        assert_eq!(repo_client.database().trusted_root().version(), 2);
1750        assert_eq!(repo_client.database().trusted_snapshot().unwrap().version(), 2);
1751        assert_eq!(repo_client.database().trusted_targets().unwrap().version(), 2);
1752        assert_eq!(repo_client.database().trusted_timestamp().unwrap().version(), 2);
1753
1754        // Make sure we only trust the new keys.
1755        let trusted_root = repo_client.database().trusted_root();
1756        assert_eq!(
1757            trusted_root.root_keys().collect::<Vec<_>>(),
1758            repo_trusted_keys.root_keys().iter().map(|k| k.public()).collect::<Vec<_>>(),
1759        );
1760
1761        assert_eq!(
1762            trusted_root.targets_keys().collect::<Vec<_>>(),
1763            repo_trusted_keys.targets_keys().iter().map(|k| k.public()).collect::<Vec<_>>(),
1764        );
1765
1766        assert_eq!(
1767            trusted_root.snapshot_keys().collect::<Vec<_>>(),
1768            repo_trusted_keys.snapshot_keys().iter().map(|k| k.public()).collect::<Vec<_>>(),
1769        );
1770
1771        assert_eq!(
1772            trusted_root.timestamp_keys().collect::<Vec<_>>(),
1773            repo_trusted_keys.timestamp_keys().iter().map(|k| k.public()).collect::<Vec<_>>(),
1774        );
1775    }
1776
1777    const FAKE_ABI_REVISION: version_history::AbiRevision =
1778        version_history::AbiRevision::from_u64(0x7171611eb2b7b74a);
1779
1780    #[fuchsia_async::run_singlethreaded(test)]
1781    async fn test_conflicting_package_manifests_errors_out() {
1782        let tmp = tempfile::tempdir().unwrap();
1783        let dir = Utf8Path::from_path(tmp.path()).unwrap();
1784
1785        let metadata_repo_path = dir.join("metadata");
1786        let blob_repo_path = dir.join("blobs");
1787        let repo = FileSystemRepository::new(metadata_repo_path, blob_repo_path);
1788        let repo_keys = test_utils::make_repo_keys();
1789
1790        let pkg1_dir = dir.join("package1");
1791        let (_, pkg1_manifest) =
1792            test_utils::make_package_manifest("package1", pkg1_dir.as_std_path(), Vec::new());
1793        let pkg1_manifest_path = pkg1_dir.join("package1.manifest");
1794        serde_json::to_writer(std::fs::File::create(&pkg1_manifest_path).unwrap(), &pkg1_manifest)
1795            .unwrap();
1796
1797        // Whoops, we created a package with the same package name but with different contents.
1798        let pkg2_dir = dir.join("package2");
1799        let pkg2_meta_far_path = pkg2_dir.join("meta.far");
1800        let pkg2_manifest = PackageBuilder::new("package1", FAKE_ABI_REVISION)
1801            .build(&pkg2_dir, &pkg2_meta_far_path)
1802            .unwrap();
1803        let pkg2_manifest_path = pkg2_dir.join("package2.manifest");
1804        serde_json::to_writer(std::fs::File::create(&pkg2_manifest_path).unwrap(), &pkg2_manifest)
1805            .unwrap();
1806
1807        assert!(
1808            RepoBuilder::create(&repo, &repo_keys)
1809                .add_package(pkg1_manifest_path)
1810                .await
1811                .unwrap()
1812                .add_package(pkg2_manifest_path)
1813                .await
1814                .is_err()
1815        );
1816    }
1817
1818    #[fuchsia_async::run_singlethreaded(test)]
1819    async fn test_stage_package_overwrites_corrupted_blob_in_blob_store() {
1820        let tmp = tempfile::tempdir().unwrap();
1821        let dir = Utf8Path::from_path(tmp.path()).unwrap();
1822
1823        let metadata_repo_path = dir.join("metadata");
1824        let blob_repo_path = dir.join("blobs");
1825        let repo = FileSystemRepository::new(metadata_repo_path, blob_repo_path.clone());
1826        let repo_keys = test_utils::make_repo_keys();
1827
1828        // Create 2 packages with a shared blob between them
1829        let contents = b"shared blob";
1830        let hash = fuchsia_merkle::root_from_slice(contents);
1831
1832        let pkg1_dir = dir.join("package1");
1833        let pkg1_meta_far_path = pkg1_dir.join("meta.far");
1834        let pkg1_manifest = {
1835            let mut builder = PackageBuilder::new("package1", FAKE_ABI_REVISION);
1836            builder.add_contents_as_blob("bin/shared", contents, &pkg1_dir).unwrap();
1837            builder.build(&pkg1_dir, &pkg1_meta_far_path).unwrap()
1838        };
1839        let pkg1_manifest_path = pkg1_dir.join("package1.manifest");
1840        serde_json::to_writer(std::fs::File::create(&pkg1_manifest_path).unwrap(), &pkg1_manifest)
1841            .unwrap();
1842
1843        let pkg2_dir = dir.join("package2");
1844        let pkg2_meta_far_path = pkg2_dir.join("meta.far");
1845        let pkg2_manifest = {
1846            let mut builder = PackageBuilder::new("package2", FAKE_ABI_REVISION);
1847            builder.add_contents_as_blob("bin/shared", contents, &pkg2_dir).unwrap();
1848            builder.build(&pkg2_dir, &pkg2_meta_far_path).unwrap()
1849        };
1850        let pkg2_manifest_path = pkg2_dir.join("package2.manifest");
1851        serde_json::to_writer(std::fs::File::create(&pkg2_manifest_path).unwrap(), &pkg2_manifest)
1852            .unwrap();
1853
1854        // Create the repo and publish the first package.
1855        RepoBuilder::create(&repo, &repo_keys)
1856            .add_package(pkg1_manifest_path)
1857            .await
1858            .unwrap()
1859            .commit()
1860            .await
1861            .unwrap();
1862
1863        // Corrupt the contents of the blob, and change it's size.
1864        let shared_blob_path = blob_repo_path.join(format!("1/{hash}"));
1865        let mut perms = fs::metadata(&shared_blob_path).unwrap().permissions();
1866        perms.set_mode(perms.mode() | 0o664);
1867        fs::set_permissions(&shared_blob_path, perms).unwrap();
1868        // It's no longer a delivery blob after this write
1869        fs::write(&shared_blob_path, b"corrupted contents").unwrap();
1870
1871        // Publish the second package, which should succeed and overwrite the corrupted blob.
1872        RepoBuilder::create(&repo, &repo_keys)
1873            .add_package(pkg2_manifest_path)
1874            .await
1875            .unwrap()
1876            .commit()
1877            .await
1878            .unwrap();
1879
1880        let delivery_blob = std::fs::read(&shared_blob_path).unwrap();
1881        let actual: Vec<u8> = delivery_blob::decompress(&delivery_blob).unwrap();
1882        assert_eq!(actual, contents);
1883    }
1884
1885    #[fuchsia_async::run_singlethreaded(test)]
1886    async fn test_stage_package_does_not_overwrite_corrupted_blob_in_blob_store_if_lengths_match() {
1887        let tmp = tempfile::tempdir().unwrap();
1888        let dir = Utf8Path::from_path(tmp.path()).unwrap();
1889
1890        let metadata_repo_path = dir.join("metadata");
1891        let blob_repo_path = dir.join("blobs");
1892        let repo = FileSystemRepository::new(metadata_repo_path, blob_repo_path.clone());
1893        let repo_keys = test_utils::make_repo_keys();
1894
1895        // Create 2 packages with a shared blob between them
1896        let contents = b"shared blob";
1897        let contents2 = b"corruptblob";
1898        let hash = fuchsia_merkle::root_from_slice(contents);
1899
1900        let pkg1_dir = dir.join("package1");
1901        let pkg1_meta_far_path = pkg1_dir.join("meta.far");
1902        let pkg1_manifest = {
1903            let mut builder = PackageBuilder::new("package1", FAKE_ABI_REVISION);
1904            builder.add_contents_as_blob("bin/shared", contents, &pkg1_dir).unwrap();
1905            builder.build(&pkg1_dir, &pkg1_meta_far_path).unwrap()
1906        };
1907        let pkg1_manifest_path = pkg1_dir.join("package1.manifest");
1908        serde_json::to_writer(std::fs::File::create(&pkg1_manifest_path).unwrap(), &pkg1_manifest)
1909            .unwrap();
1910
1911        let pkg2_dir = dir.join("package2");
1912        let pkg2_meta_far_path = pkg2_dir.join("meta.far");
1913        let pkg2_manifest = {
1914            let mut builder = PackageBuilder::new("package2", FAKE_ABI_REVISION);
1915            builder.add_contents_as_blob("bin/shared", contents, &pkg2_dir).unwrap();
1916            builder.build(&pkg2_dir, &pkg2_meta_far_path).unwrap()
1917        };
1918        let pkg2_manifest_path = pkg2_dir.join("package2.manifest");
1919        serde_json::to_writer(std::fs::File::create(&pkg2_manifest_path).unwrap(), &pkg2_manifest)
1920            .unwrap();
1921
1922        // Create the repo and publish the first package.
1923        RepoBuilder::create(&repo, &repo_keys)
1924            .add_package(pkg1_manifest_path)
1925            .await
1926            .unwrap()
1927            .commit()
1928            .await
1929            .unwrap();
1930
1931        // Corrupt the contents of the blob, but keep it's length the same.
1932        let shared_blob_path = blob_repo_path.join(format!("1/{hash}"));
1933        let mut perms = fs::metadata(&shared_blob_path).unwrap().permissions();
1934        perms.set_mode(perms.mode() | 0o664);
1935        fs::set_permissions(&shared_blob_path, perms).unwrap();
1936        fs::write(&shared_blob_path, delivery_blob::generate(DeliveryBlobType::Type1, contents2))
1937            .unwrap();
1938
1939        // Publish the second package, which should succeed and overwrite the corrupted blob.
1940        RepoBuilder::create(&repo, &repo_keys)
1941            .add_package(pkg2_manifest_path)
1942            .await
1943            .unwrap()
1944            .commit()
1945            .await
1946            .unwrap();
1947
1948        // The blob still contains the corrupted contents.
1949        let delivery_blob = std::fs::read(&shared_blob_path).unwrap();
1950        let actual: Vec<u8> = delivery_blob::decompress(&delivery_blob).unwrap();
1951        assert_eq!(actual, contents2);
1952    }
1953
1954    #[fuchsia_async::run_singlethreaded(test)]
1955    async fn test_conflicting_package_archives_errors_out() {
1956        let tmp = tempfile::tempdir().unwrap();
1957        let dir = Utf8Path::from_path(tmp.path()).unwrap();
1958
1959        let metadata_repo_path = dir.join("metadata");
1960        let blob_repo_path = dir.join("blobs");
1961        let repo = FileSystemRepository::new(metadata_repo_path, blob_repo_path);
1962        let repo_keys = test_utils::make_repo_keys();
1963
1964        let pkg1_dir = dir.join("package1");
1965        let (_, pkg1_manifest) =
1966            test_utils::make_package_manifest("package1", pkg1_dir.as_std_path(), Vec::new());
1967        let pkg1_manifest_path = pkg1_dir.join("package1.manifest");
1968        serde_json::to_writer(std::fs::File::create(&pkg1_manifest_path).unwrap(), &pkg1_manifest)
1969            .unwrap();
1970
1971        // Whoops, we created a package with the same package name but with different contents.
1972        let pkg2_dir = dir.join("package2");
1973        let pkg2_meta_far_path = pkg2_dir.join("meta.far");
1974        let pkg2_manifest = PackageBuilder::new("package1", FAKE_ABI_REVISION)
1975            .build(&pkg2_dir, &pkg2_meta_far_path)
1976            .unwrap();
1977        let pkg2_manifest_path = pkg2_dir.join("package2.manifest");
1978        serde_json::to_writer(std::fs::File::create(&pkg2_manifest_path).unwrap(), &pkg2_manifest)
1979            .unwrap();
1980
1981        let archive_outdir = TempDir::new().unwrap();
1982
1983        let archive_path1 = archive_outdir.path().join("p1.far");
1984        let archive_file1 = fs::File::create(archive_path1.clone()).unwrap();
1985        pkg1_manifest.archive(&pkg1_dir, &archive_file1).await.unwrap();
1986
1987        let archive_path2 = archive_outdir.path().join("p2.far");
1988        let archive_file2 = fs::File::create(archive_path2.clone()).unwrap();
1989        pkg2_manifest.archive(&pkg2_dir, &archive_file2).await.unwrap();
1990
1991        assert!(
1992            RepoBuilder::create(&repo, &repo_keys)
1993                .add_package_archive(Utf8PathBuf::from_path_buf(archive_path1).unwrap())
1994                .await
1995                .unwrap()
1996                .add_package_archive(Utf8PathBuf::from_path_buf(archive_path2).unwrap())
1997                .await
1998                .is_err()
1999        );
2000    }
2001
2002    #[fuchsia_async::run_singlethreaded(test)]
2003    async fn test_conflicting_package_archive_and_manifest_errors_out() {
2004        let tmp = tempfile::tempdir().unwrap();
2005        let dir = Utf8Path::from_path(tmp.path()).unwrap();
2006
2007        let metadata_repo_path = dir.join("metadata");
2008        let blob_repo_path = dir.join("blobs");
2009        let repo = FileSystemRepository::new(metadata_repo_path, blob_repo_path);
2010        let repo_keys = test_utils::make_repo_keys();
2011
2012        let pkg1_dir = dir.join("package1");
2013        let (_, pkg1_manifest) =
2014            test_utils::make_package_manifest("package1", pkg1_dir.as_std_path(), Vec::new());
2015        let pkg1_manifest_path = pkg1_dir.join("package1.manifest");
2016        serde_json::to_writer(std::fs::File::create(&pkg1_manifest_path).unwrap(), &pkg1_manifest)
2017            .unwrap();
2018
2019        // Whoops, we created a package with the same package name but with different contents.
2020        let pkg2_dir = dir.join("package2");
2021        let pkg2_meta_far_path = pkg2_dir.join("meta.far");
2022        let pkg2_manifest = PackageBuilder::new("package1", FAKE_ABI_REVISION)
2023            .build(&pkg2_dir, &pkg2_meta_far_path)
2024            .unwrap();
2025        let pkg2_manifest_path = pkg2_dir.join("package2.manifest");
2026        serde_json::to_writer(std::fs::File::create(&pkg2_manifest_path).unwrap(), &pkg2_manifest)
2027            .unwrap();
2028
2029        let archive_outdir = TempDir::new().unwrap();
2030
2031        let archive_path1 = archive_outdir.path().join("p1.far");
2032        let archive_file1 = fs::File::create(archive_path1.clone()).unwrap();
2033        pkg1_manifest.archive(&pkg1_dir, &archive_file1).await.unwrap();
2034
2035        assert!(
2036            RepoBuilder::create(&repo, &repo_keys)
2037                .add_package_archive(Utf8PathBuf::from_path_buf(archive_path1).unwrap())
2038                .await
2039                .unwrap()
2040                .add_package(pkg2_manifest_path)
2041                .await
2042                .is_err()
2043        );
2044    }
2045
2046    #[fuchsia_async::run_singlethreaded(test)]
2047    async fn test_blob_size_differs_from_manifest_errors_out() {
2048        let tmp = tempfile::tempdir().unwrap();
2049        let dir = Utf8Path::from_path(tmp.path()).unwrap();
2050
2051        let metadata_repo_path = dir.join("metadata");
2052        let blob_repo_path = dir.join("blobs");
2053        let repo = FileSystemRepository::new(metadata_repo_path, blob_repo_path);
2054        let repo_keys = test_utils::make_repo_keys();
2055
2056        let pkg1_dir = dir.join("package1");
2057        let (_pkg1_meta_far_path, pkg1_manifest) =
2058            test_utils::make_package_manifest("package1", pkg1_dir.as_std_path(), Vec::new());
2059        let pkg1_manifest_path = pkg1_dir.join("package1.manifest");
2060        serde_json::to_writer(std::fs::File::create(&pkg1_manifest_path).unwrap(), &pkg1_manifest)
2061            .unwrap();
2062
2063        // Oh no, the blob now disagrees with metadata in the manifest.
2064        let pkg1_blob_path = pkg1_dir.join("package1").join("bin").join("package1");
2065        let mut blob = fs::File::options().append(true).open(pkg1_blob_path).unwrap();
2066        blob.write_all(b"more bytes the manifest doesn't know about.").unwrap();
2067        blob.write_all(b"also, my hash is now wrong").unwrap();
2068        drop(blob);
2069
2070        let err = RepoBuilder::create(&repo, &repo_keys)
2071            .add_package_manifest(Some(pkg1_dir), pkg1_manifest)
2072            .await
2073            .unwrap_err();
2074        assert_matches!(err.downcast_ref::<BlobSizeMismatchError>(), Some(_));
2075    }
2076
2077    #[fuchsia_async::run_singlethreaded(test)]
2078    async fn test_rejects_distinct_merkles_referring_to_the_same_inode() {
2079        let tmp = tempfile::tempdir().unwrap();
2080        let dir = Utf8Path::from_path(tmp.path()).unwrap();
2081
2082        let metadata_repo_path = dir.join("metadata");
2083        let blob_repo_path = dir.join("blobs");
2084        let repo = FileSystemRepository::new(metadata_repo_path, blob_repo_path);
2085        let repo_keys = test_utils::make_repo_keys();
2086
2087        let pkg1_dir = dir.join("package1");
2088        let pkg1_meta_far_path = pkg1_dir.join("meta.far");
2089        let pkg1_manifest = {
2090            let mut builder = PackageBuilder::new("package1", FAKE_ABI_REVISION);
2091            builder.add_contents_as_blob("bin/blob1", b"blob1", &pkg1_dir).unwrap();
2092            builder.add_contents_as_blob("bin/blob2", b"blob2", &pkg1_dir).unwrap();
2093            builder.build(&pkg1_dir, &pkg1_meta_far_path).unwrap()
2094        };
2095        let pkg1_manifest_path = pkg1_dir.join("package1.manifest");
2096        serde_json::to_writer(std::fs::File::create(&pkg1_manifest_path).unwrap(), &pkg1_manifest)
2097            .unwrap();
2098
2099        // Make blob2 a hardlink of blob1, but the manifest has 2 distinct merkles for it
2100        let pkg1_blob1_path = pkg1_dir.join("bin").join("blob1");
2101        let pkg1_blob2_path = pkg1_dir.join("bin").join("blob2");
2102        fs::remove_file(&pkg1_blob2_path).unwrap();
2103        fs::hard_link(&pkg1_blob1_path, &pkg1_blob2_path).unwrap();
2104
2105        let err = RepoBuilder::create(&repo, &repo_keys)
2106            .add_package_manifest(Some(pkg1_dir), pkg1_manifest)
2107            .await
2108            .unwrap_err();
2109        assert_matches!(err.downcast_ref::<MerkleHardLinkMismatchError>(), Some(_));
2110    }
2111}