fuchsia_pkg_testing/
repo.rs

1// Copyright 2019 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5//! Test tools for building and serving TUF repositories containing Fuchsia packages.
6
7use crate::package::Package;
8use crate::serve::ServedRepositoryBuilder;
9use anyhow::{Context as _, Error, format_err};
10use delivery_blob::DeliveryBlobType;
11use fidl_fuchsia_pkg_ext::{
12    MirrorConfig, RepositoryConfig, RepositoryConfigBuilder, RepositoryKey,
13};
14use fuchsia_merkle::Hash;
15use fuchsia_repo::repo_builder::RepoBuilder;
16use fuchsia_repo::repo_keys::RepoKeys;
17use fuchsia_repo::repository::PmRepository;
18use fuchsia_url::RepositoryUrl;
19use maybe_owned::MaybeOwned;
20use serde::Deserialize;
21use std::collections::{BTreeMap, BTreeSet};
22use std::fs::{self, File};
23use std::io::{self, Read};
24use std::path::PathBuf;
25use std::sync::Arc;
26use tempfile::TempDir;
27use walkdir::WalkDir;
28
29/// A builder to simplify construction of TUF repositories containing Fuchsia packages.
30#[derive(Debug)]
31pub struct RepositoryBuilder<'a> {
32    packages: Vec<MaybeOwned<'a, Package>>,
33    repodir: Option<PathBuf>,
34    delivery_blob_type: DeliveryBlobType,
35}
36
37impl Default for RepositoryBuilder<'_> {
38    fn default() -> Self {
39        Self { packages: vec![], repodir: None, delivery_blob_type: DeliveryBlobType::Type1 }
40    }
41}
42
43impl<'a> RepositoryBuilder<'a> {
44    /// Creates a new `RepositoryBuilder`.
45    pub fn new() -> Self {
46        Self::default()
47    }
48
49    /// Creates a new `RepositoryBuilder` from a template TUF repository dir.
50    pub fn from_template_dir(path: impl Into<PathBuf>) -> Self {
51        Self { repodir: Some(path.into()), ..Self::default() }
52    }
53
54    /// Adds a package (or a reference to one) to the repository.
55    pub fn add_package(mut self, package: impl Into<MaybeOwned<'a, Package>>) -> Self {
56        self.packages.push(package.into());
57        self
58    }
59
60    /// Set the type of delivery blob, the blobs will be exposed at "/blobs/{type}/".
61    pub fn delivery_blob_type(mut self, delivery_blob_type: DeliveryBlobType) -> Self {
62        self.delivery_blob_type = delivery_blob_type;
63        self
64    }
65
66    /// Builds the repository.
67    pub async fn build(self) -> Result<Repository, Error> {
68        let repodir = tempfile::tempdir().context("create /repo")?;
69
70        // If configured to use a template repository directory, first copy it into the repo dir.
71        let keys = if let Some(templatedir) = self.repodir {
72            for entry in WalkDir::new(&templatedir) {
73                let entry = entry?;
74                if entry.path() == templatedir {
75                    continue;
76                }
77                let relative_entry_path = entry.path().strip_prefix(&templatedir)?;
78                let target_path = repodir.path().join(relative_entry_path);
79                if entry.file_type().is_dir() {
80                    fs::create_dir(target_path)?;
81                } else {
82                    fs::copy(entry.path(), target_path)?;
83                }
84            }
85
86            RepoKeys::from_dir(repodir.path().join("keys").as_path()).unwrap()
87        } else {
88            // Otherwise, generate a new empty repo and keys.
89            let keys = RepoKeys::generate(repodir.path()).unwrap();
90
91            RepoBuilder::create(
92                PmRepository::builder(repodir.path().to_owned().try_into()?)
93                    .delivery_blob_type(self.delivery_blob_type)
94                    .build(),
95                &keys,
96            )
97            .commit()
98            .await
99            .unwrap();
100
101            keys
102        };
103
104        // Open the repo for an update.
105        let pm_repo = PmRepository::builder(repodir.path().to_owned().try_into()?)
106            .delivery_blob_type(self.delivery_blob_type)
107            .build();
108        let client = {
109            let local = tuf::repository::EphemeralRepository::<tuf::pouf::Pouf1>::new();
110
111            let mut client = tuf::client::Client::with_trusted_root_keys(
112                tuf::client::Config::default(),
113                tuf::metadata::MetadataVersion::None,
114                keys.root_keys().len() as u32,
115                keys.root_keys().iter().map(|key| key.public()),
116                local,
117                &pm_repo,
118            )
119            .await
120            .unwrap();
121            client.update().await.unwrap();
122
123            client
124        };
125        let database = client.database();
126        let mut repo = RepoBuilder::from_database(&pm_repo, &keys, database);
127
128        repo = repo
129            .add_packages(
130                self.packages.iter().map(|package| package.artifacts().join("manifest.json")),
131            )
132            .await
133            .unwrap();
134        repo.commit().await.unwrap();
135
136        Ok(Repository { dir: repodir, delivery_blob_type: self.delivery_blob_type })
137    }
138}
139
140/// Metadata for a package contained within a [`Repository`].
141#[derive(Debug, PartialOrd, Ord, PartialEq, Eq)]
142pub struct PackageEntry {
143    path: String,
144    meta_far_merkle: Hash,
145    meta_far_size: usize,
146}
147
148pub(crate) fn iter_packages(
149    reader: impl Read,
150) -> Result<impl Iterator<Item = Result<PackageEntry, Error>>, Error> {
151    // TODO when metadata is compatible, use rust-tuf instead.
152    #[derive(Debug, Deserialize)]
153    struct TargetsJson {
154        signed: Targets,
155    }
156    #[derive(Debug, Deserialize)]
157    struct Targets {
158        targets: BTreeMap<String, Target>,
159    }
160    #[derive(Debug, Deserialize)]
161    struct Target {
162        custom: TargetCustom,
163    }
164    #[derive(Debug, Deserialize)]
165    struct TargetCustom {
166        merkle: String,
167        size: usize,
168    }
169
170    let targets_json: TargetsJson = serde_json::from_reader(reader)?;
171
172    Ok(targets_json.signed.targets.into_iter().map(|(path, target)| {
173        Ok(PackageEntry {
174            path,
175            meta_far_merkle: target.custom.merkle.parse()?,
176            meta_far_size: target.custom.size,
177        })
178    }))
179}
180
181/// A TUF repository generated by a [`RepositoryBuilder`].
182#[derive(Debug)]
183pub struct Repository {
184    dir: TempDir,
185    delivery_blob_type: DeliveryBlobType,
186}
187
188impl Repository {
189    fn blobs_dir(&self) -> PathBuf {
190        match self.delivery_blob_type {
191            DeliveryBlobType::Type1 => self.dir.path().join("repository/blobs/1"),
192            t => panic!("Unsupported delivery blob type: {:?}", t),
193        }
194    }
195
196    /// Returns an iterator over all blobs contained in this repository.
197    pub fn iter_blobs(&self) -> Result<impl Iterator<Item = Result<Hash, Error>>, Error> {
198        Ok(fs::read_dir(self.blobs_dir())?
199            .filter(|entry| entry.as_ref().map(|e| !e.path().is_dir()).unwrap_or(true))
200            .map(|entry| {
201                Ok(entry?
202                    .file_name()
203                    .to_str()
204                    .ok_or_else(|| format_err!("non-utf8 file path"))?
205                    .parse()?)
206            }))
207    }
208
209    /// Returns a set of all blobs contained in this repository.
210    pub fn list_blobs(&self) -> Result<BTreeSet<Hash>, Error> {
211        self.iter_blobs()?.collect()
212    }
213
214    /// Removes the specified from the repository.
215    pub fn purge_blobs(&self, blobs: impl Iterator<Item = Hash>) {
216        for blob in blobs {
217            fs::remove_file(self.blobs_dir().join(format!("{blob}"))).unwrap();
218        }
219    }
220
221    /// Reads the contents of requested blob from the repository.
222    pub fn read_blob(&self, merkle_root: &Hash) -> Result<Vec<u8>, Error> {
223        let raw_blob = fs::read(self.blobs_dir().join(format!("{merkle_root}")))?;
224        Ok(delivery_blob::decompress(&raw_blob)?)
225    }
226
227    /// Reads the contents of requested delivery blob from the repository.
228    pub fn read_delivery_blob(
229        &self,
230        delivery_blob_type: u32,
231        merkle_root: &Hash,
232    ) -> Result<Vec<u8>, io::Error> {
233        fs::read(
234            self.dir.path().join(format!("repository/blobs/{delivery_blob_type}/{merkle_root}")),
235        )
236    }
237
238    /// Writes a blob with the given merkle and data (which may not match), into the repository's
239    /// blobs directory, without any compression.
240    pub fn write_blob(&self, merkle_root: &Hash, blob: &[u8]) -> Result<usize, Error> {
241        let blob_data_to_write = match self.delivery_blob_type {
242            DeliveryBlobType::Type1 => {
243                delivery_blob::Type1Blob::generate(blob, delivery_blob::CompressionMode::Never)
244            }
245            t => panic!("Unsupported delivery blob type: {:?}", t),
246        };
247        let () = fs::write(self.blobs_dir().join(format!("{merkle_root}")), &blob_data_to_write)
248            .with_context(|| format!("writing blob: {merkle_root}"))?;
249        Ok(blob_data_to_write.len())
250    }
251
252    /// Overwrites the delivery blob to uncompressed version from an uncompressed blob that is
253    /// already in the repository, returns the size of the delivery blob.
254    pub fn overwrite_uncompressed_delivery_blob(&self, merkle_root: &Hash) -> Result<usize, Error> {
255        let blob =
256            self.read_blob(merkle_root).with_context(|| format!("reading blob: {merkle_root}"))?;
257        let delivery_blob =
258            delivery_blob::Type1Blob::generate(&blob, delivery_blob::CompressionMode::Never);
259        let () = fs::write(self.blobs_dir().join(format!("{merkle_root}")), &delivery_blob)?;
260        Ok(delivery_blob.len())
261    }
262
263    /// Returns the path of the base of the repository.
264    pub fn path(&self) -> PathBuf {
265        self.dir.path().join("repository")
266    }
267
268    /// Returns an iterator over all packages contained in this repository.
269    pub fn iter_packages(
270        &self,
271    ) -> Result<impl Iterator<Item = Result<PackageEntry, Error>>, Error> {
272        iter_packages(io::BufReader::new(File::open(
273            self.dir.path().join("repository/targets.json"),
274        )?))
275    }
276
277    /// Returns a sorted vector of all packages contained in this repository.
278    pub fn list_packages(&self) -> Result<Vec<PackageEntry>, Error> {
279        let mut packages = self.iter_packages()?.collect::<Result<Vec<_>, _>>()?;
280        packages.sort_unstable();
281        Ok(packages)
282    }
283
284    /// Generate a [`RepositoryConfigBuilder`] suitable for configuring a
285    /// package resolver to use this repository when it is served at the given
286    /// URL.
287    pub fn make_repo_config_builder(&self, url: RepositoryUrl) -> RepositoryConfigBuilder {
288        let mut builder = RepositoryConfigBuilder::new(url);
289
290        for key in self.root_keys() {
291            builder = builder.add_root_key(key);
292        }
293
294        builder
295    }
296
297    /// Generate a [`RepositoryConfig`] suitable for configuring a package resolver to use this
298    /// repository when it is served at the given URL.
299    pub fn make_repo_config(
300        &self,
301        url: RepositoryUrl,
302        mirror_config: Option<MirrorConfig>,
303        use_local_mirror: bool,
304    ) -> RepositoryConfig {
305        let mut builder = self.make_repo_config_builder(url);
306
307        if let Some(mirror_config) = mirror_config {
308            builder = builder.add_mirror(mirror_config)
309        }
310
311        builder.use_local_mirror(use_local_mirror).build()
312    }
313
314    /// Get the root keys used by this repository.
315    pub fn root_keys(&self) -> BTreeSet<RepositoryKey> {
316        // TODO when metadata is compatible, use rust-tuf instead.
317        #[derive(Debug, Deserialize)]
318        struct RootJson {
319            signed: Root,
320        }
321        #[derive(Debug, Deserialize)]
322        struct Root {
323            roles: BTreeMap<String, Role>,
324            keys: BTreeMap<String, Key>,
325        }
326        #[derive(Debug, Deserialize)]
327        struct Role {
328            keyids: Vec<String>,
329        }
330        #[derive(Debug, Deserialize)]
331        struct Key {
332            keyval: KeyVal,
333        }
334        #[derive(Debug, Deserialize)]
335        struct KeyVal {
336            public: String,
337        }
338
339        let root_json: RootJson = serde_json::from_reader(io::BufReader::new(
340            File::open(self.dir.path().join("repository/root.json")).unwrap(),
341        ))
342        .unwrap();
343        let root = root_json.signed;
344
345        root.roles["root"]
346            .keyids
347            .iter()
348            .map(|keyid| {
349                RepositoryKey::Ed25519(hex::decode(root.keys[keyid].keyval.public.clone()).unwrap())
350            })
351            .collect()
352    }
353
354    /// Serves the repository over HTTP using hyper.
355    pub fn server(self: Arc<Self>) -> ServedRepositoryBuilder {
356        ServedRepositoryBuilder::new(self)
357    }
358}
359
360#[cfg(test)]
361mod tests {
362
363    use super::*;
364    use crate::package::PackageBuilder;
365
366    #[fuchsia_async::run_singlethreaded(test)]
367    async fn test_repo_builder() {
368        let same_contents = b"same contents";
369        let repo = RepositoryBuilder::new()
370            .delivery_blob_type(DeliveryBlobType::Type1)
371            .add_package(
372                PackageBuilder::new("rolldice")
373                    .add_resource_at("bin/rolldice", "#!/boot/bin/sh\necho 4\n".as_bytes())
374                    .add_resource_at(
375                        "meta/rolldice.cml",
376                        r#"{"program":{"binary":"bin/rolldice"}}"#.as_bytes(),
377                    )
378                    .add_resource_at("data/duplicate_a", "same contents".as_bytes())
379                    .build()
380                    .await
381                    .unwrap(),
382            )
383            .add_package(
384                PackageBuilder::new("fortune")
385                    .add_resource_at(
386                        "bin/fortune",
387                        "#!/boot/bin/sh\necho ask again later\n".as_bytes(),
388                    )
389                    .add_resource_at(
390                        "meta/fortune.cml",
391                        r#"{"program":{"binary":"bin/fortune"}}"#.as_bytes(),
392                    )
393                    .add_resource_at("data/duplicate_b", &same_contents[..])
394                    .add_resource_at("data/duplicate_c", &same_contents[..])
395                    .build()
396                    .await
397                    .unwrap(),
398            )
399            .build()
400            .await
401            .unwrap();
402
403        let mut blobs = repo.list_blobs().unwrap();
404        // 2 meta FARs, 2 binaries, and 1 duplicated resource
405        assert_eq!(blobs.len(), 5);
406
407        // Spot check the contents of a blob in the repo.
408        let same_contents_merkle = fuchsia_merkle::root_from_slice(same_contents);
409        assert_eq!(repo.read_blob(&same_contents_merkle).unwrap(), same_contents);
410        assert_eq!(
411            repo.read_delivery_blob(1, &same_contents_merkle).unwrap(),
412            delivery_blob::generate(delivery_blob::DeliveryBlobType::Type1, same_contents)
413        );
414
415        let packages = repo.list_packages().unwrap();
416        assert_eq!(
417            packages.into_iter().map(|pkg| pkg.path).collect::<Vec<_>>(),
418            vec!["fortune/0".to_owned(), "rolldice/0".to_owned()]
419        );
420
421        // Ensure purge_blobs purges blobs
422        let cutpoint = blobs.iter().nth(2).unwrap().to_owned();
423        let removed = blobs.split_off(&cutpoint);
424        repo.purge_blobs(removed.into_iter());
425        assert_eq!(repo.list_blobs().unwrap(), blobs);
426    }
427
428    #[fuchsia_async::run_singlethreaded(test)]
429    async fn test_repo_builder_template() -> Result<(), Error> {
430        let repodir = tempfile::tempdir().context("create tempdir")?;
431
432        // Populate repodir with a freshly created repository.
433        let keys_dir = repodir.path().join("keys");
434        fs::create_dir(&keys_dir).unwrap();
435        let repo_keys = RepoKeys::generate(&keys_dir).unwrap();
436        RepoBuilder::create(
437            PmRepository::builder(repodir.path().to_owned().try_into()?).build(),
438            &repo_keys,
439        )
440        .commit()
441        .await
442        .unwrap();
443
444        // Build a repo from the template.
445        let repo = RepositoryBuilder::from_template_dir(repodir.path())
446            .add_package(PackageBuilder::new("test").build().await?)
447            .build()
448            .await?;
449
450        // Ensure the repository used the generated keys.
451        for path in &["root.json", "snapshot.json", "timestamp.json", "targets.json"] {
452            assert_eq!(
453                fs::read(repodir.path().join("keys").join(path))?,
454                fs::read(repo.dir.path().join("keys").join(path))?,
455            );
456        }
457
458        Ok(())
459    }
460}