1#![allow(clippy::let_unit_value)]
6
7use crate::args::{
8 Args, Command, GcCommand, GetHashCommand, OpenCommand, PkgStatusCommand, RepoAddCommand,
9 RepoAddFileCommand, RepoAddSubCommand, RepoAddUrlCommand, RepoCommand, RepoRemoveCommand,
10 RepoShowCommand, RepoSubCommand, ResolveCommand, RuleClearCommand, RuleCommand,
11 RuleDumpDynamicCommand, RuleListCommand, RuleReplaceCommand, RuleReplaceFileCommand,
12 RuleReplaceJsonCommand, RuleReplaceSubCommand, RuleSubCommand,
13};
14use anyhow::{Context as _, bail, format_err};
15use fetch_url::fetch_url;
16use fidl_fuchsia_pkg_rewrite::EngineMarker;
17use fidl_fuchsia_pkg_rewrite_ext::{Rule as RewriteRule, RuleConfig, do_transaction};
18use fuchsia_component::client::connect_to_protocol;
19use fuchsia_url::RepositoryUrl;
20use futures::stream::TryStreamExt;
21use std::fs::File;
22use std::io;
23use std::process::exit;
24use {
25 fidl_fuchsia_pkg as fpkg, fidl_fuchsia_pkg_ext as pkg,
26 fidl_fuchsia_pkg_garbagecollector as fpkg_gc, fuchsia_async as fasync,
27};
28
29mod args;
30
31pub fn main() -> Result<(), anyhow::Error> {
32 let mut executor = fasync::LocalExecutorBuilder::new().build();
33 let Args { command } = argh::from_env();
34 exit(executor.run_singlethreaded(main_helper(command))?)
35}
36
37async fn main_helper(command: Command) -> Result<i32, anyhow::Error> {
38 match command {
39 Command::Resolve(ResolveCommand { pkg_url, verbose }) => {
40 let resolver = connect_to_protocol::<fpkg::PackageResolverMarker>()
41 .context("Failed to connect to resolver service")?;
42 println!("resolving {pkg_url}");
43
44 let (dir, dir_server_end) = fidl::endpoints::create_proxy();
45
46 let _: fpkg::ResolutionContext = resolver
47 .resolve(&pkg_url, dir_server_end)
48 .await?
49 .map_err(fidl_fuchsia_pkg_ext::ResolveError::from)
50 .with_context(|| format!("Failed to resolve {pkg_url}"))?;
51
52 if verbose {
53 println!("package contents:");
54 let mut stream =
55 fuchsia_fs::directory::readdir_recursive(&dir, None);
56 while let Some(entry) = stream.try_next().await? {
57 println!("/{}", entry.name);
58 }
59 }
60
61 Ok(0)
62 }
63 Command::GetHash(GetHashCommand { pkg_url }) => {
64 let resolver = connect_to_protocol::<fpkg::PackageResolverMarker>()
65 .context("Failed to connect to resolver service")?;
66 let blob_id =
67 resolver.get_hash(&fpkg::PackageUrl { url: pkg_url }).await?.map_err(|i| {
68 format_err!(
69 "Failed to get package hash with error: {}",
70 zx::Status::from_raw(i)
71 )
72 })?;
73 println!("{}", pkg::BlobId::from(blob_id));
74 Ok(0)
75 }
76 Command::PkgStatus(PkgStatusCommand { pkg_url }) => {
77 let resolver = connect_to_protocol::<fpkg::PackageResolverMarker>()
78 .context("Failed to connect to resolver service")?;
79 let blob_id = match resolver.get_hash(&fpkg::PackageUrl { url: pkg_url }).await? {
80 Ok(blob_id) => pkg::BlobId::from(blob_id),
81 Err(status) => match zx::Status::from_raw(status) {
82 zx::Status::NOT_FOUND => {
83 println!("Package in registered TUF repo: no");
84 println!("Package on disk: unknown (did not check since not in tuf repo)");
85 return Ok(3);
86 }
87 other_failure_status => {
88 bail!(
89 "Cannot determine pkg status. Failed fuchsia.pkg.PackageResolver.GetHash with unexpected status: {:?}",
90 other_failure_status
91 );
92 }
93 },
94 };
95 println!("Package in registered TUF repo: yes (merkle={blob_id})");
96
97 let cache = pkg::cache::Client::from_proxy(
98 connect_to_protocol::<fpkg::PackageCacheMarker>()
99 .context("Failed to connect to cache service")?,
100 );
101
102 match cache.get_already_cached(blob_id).await {
103 Ok(_) => {}
104 Err(e) if e.was_not_cached() => {
105 println!("Package on disk: no");
106 return Ok(2);
107 }
108 Err(e) => {
109 bail!(
110 "Cannot determine pkg status. Failed fuchsia.pkg.PackageCache.Get: {:?}",
111 e
112 );
113 }
114 }
115 println!("Package on disk: yes");
116 Ok(0)
117 }
118 Command::Open(OpenCommand { meta_far_blob_id }) => {
119 let cache = pkg::cache::Client::from_proxy(
120 connect_to_protocol::<fpkg::PackageCacheMarker>()
121 .context("Failed to connect to cache service")?,
122 );
123 println!("opening {meta_far_blob_id}");
124
125 let dir = cache.get_already_cached(meta_far_blob_id).await?.into_proxy();
126 let entries = fuchsia_fs::directory::readdir_recursive(&dir, None)
127 .try_collect::<Vec<_>>()
128 .await?;
129 println!("package contents:");
130 for entry in entries {
131 println!("/{}", entry.name);
132 }
133
134 Ok(0)
135 }
136 Command::Repo(RepoCommand { verbose, subcommand }) => {
137 let repo_manager = connect_to_protocol::<fpkg::RepositoryManagerMarker>()
138 .context("Failed to connect to resolver service")?;
139
140 match subcommand {
141 None => {
142 if !verbose {
143 let repos = fetch_repos(repo_manager).await?;
145
146 let mut urls =
147 repos.into_iter().map(|r| r.repo_url().to_string()).collect::<Vec<_>>();
148 urls.sort_unstable();
149 urls.into_iter().for_each(|url| println!("{url}"));
150 } else {
151 let repos = fetch_repos(repo_manager).await?;
152
153 let s = serde_json::to_string_pretty(&repos).expect("valid json");
154 println!("{s}");
155 }
156 Ok(0)
157 }
158 Some(RepoSubCommand::Add(RepoAddCommand { subcommand })) => {
159 match subcommand {
160 RepoAddSubCommand::File(RepoAddFileCommand { persist, name, file }) => {
161 let mut repo: pkg::RepositoryConfig =
162 serde_json::from_reader(io::BufReader::new(File::open(file)?))?;
163 if let Some(n) = name {
166 repo = pkg::RepositoryConfigBuilder::from(repo)
167 .repo_url(RepositoryUrl::parse_host(n)?)
168 .build();
169 }
170 if persist {
173 repo = pkg::RepositoryConfigBuilder::from(repo)
174 .repo_storage_type(pkg::RepositoryStorageType::Persistent)
175 .build();
176 }
177
178 let res = repo_manager.add(&repo.into()).await?;
179 let () = res.map_err(zx::Status::from_raw)?;
180 }
181 RepoAddSubCommand::Url(RepoAddUrlCommand { persist, name, repo_url }) => {
182 let res = fetch_url(repo_url, None).await?;
183 let mut repo: pkg::RepositoryConfig = serde_json::from_slice(&res)?;
184 if let Some(n) = name {
187 repo = pkg::RepositoryConfigBuilder::from(repo)
188 .repo_url(RepositoryUrl::parse_host(n)?)
189 .build();
190 }
191 if persist {
194 repo = pkg::RepositoryConfigBuilder::from(repo)
195 .repo_storage_type(pkg::RepositoryStorageType::Persistent)
196 .build();
197 }
198
199 let res = repo_manager.add(&repo.into()).await?;
200 let () = res.map_err(zx::Status::from_raw)?;
201 }
202 }
203
204 Ok(0)
205 }
206
207 Some(RepoSubCommand::Remove(RepoRemoveCommand { repo_url })) => {
208 let res = repo_manager.remove(&repo_url).await?;
209 let () = res.map_err(zx::Status::from_raw)?;
210
211 Ok(0)
212 }
213
214 Some(RepoSubCommand::Show(RepoShowCommand { repo_url })) => {
215 let repos = fetch_repos(repo_manager).await?;
216 for repo in repos.into_iter() {
217 if repo.repo_url().to_string() == repo_url {
218 let s = serde_json::to_string_pretty(&repo).expect("valid json");
219 println!("{s}");
220 return Ok(0);
221 }
222 }
223
224 println!("Package repository not found: {repo_url:?}");
225 Ok(1)
226 }
227 }
228 }
229 Command::Rule(RuleCommand { subcommand }) => {
230 let engine = connect_to_protocol::<EngineMarker>()
231 .context("Failed to connect to rewrite engine service")?;
232
233 match subcommand {
234 RuleSubCommand::List(RuleListCommand {}) => {
235 let (iter, iter_server_end) = fidl::endpoints::create_proxy();
236 engine.list(iter_server_end)?;
237
238 let mut rules = Vec::new();
239 loop {
240 let more = iter.next().await?;
241 if more.is_empty() {
242 break;
243 }
244 rules.extend(more);
245 }
246 let rules = rules.into_iter().map(|rule| rule.try_into()).collect::<Result<
247 Vec<RewriteRule>,
248 _,
249 >>(
250 )?;
251
252 for rule in rules {
253 println!("{rule:#?}");
254 }
255 }
256 RuleSubCommand::Clear(RuleClearCommand {}) => {
257 do_transaction(&engine, |transaction| async move {
258 transaction.reset_all()?;
259 Ok(transaction)
260 })
261 .await?;
262 }
263 RuleSubCommand::DumpDynamic(RuleDumpDynamicCommand {}) => {
264 let (transaction, transaction_server_end) = fidl::endpoints::create_proxy();
265 let () = engine.start_edit_transaction(transaction_server_end)?;
266 let (iter, iter_server_end) = fidl::endpoints::create_proxy();
267 transaction.list_dynamic(iter_server_end)?;
268 let mut rules = Vec::new();
269 loop {
270 let more = iter.next().await?;
271 if more.is_empty() {
272 break;
273 }
274 rules.extend(more);
275 }
276 let rules = rules.into_iter().map(|rule| rule.try_into()).collect::<Result<
277 Vec<RewriteRule>,
278 _,
279 >>(
280 )?;
281 let rule_configs = RuleConfig::Version1(rules);
282 let dynamic_rules = serde_json::to_string_pretty(&rule_configs)?;
283 println!("{dynamic_rules}");
284 }
285 RuleSubCommand::Replace(RuleReplaceCommand { subcommand }) => {
286 let RuleConfig::Version1(ref rules) = match subcommand {
287 RuleReplaceSubCommand::File(RuleReplaceFileCommand { file }) => {
288 serde_json::from_reader(io::BufReader::new(File::open(file)?))?
289 }
290 RuleReplaceSubCommand::Json(RuleReplaceJsonCommand { config }) => config,
291 };
292
293 do_transaction(&engine, |transaction| {
294 async move {
295 transaction.reset_all()?;
296 for rule in rules.iter().rev() {
299 let () = transaction.add(rule.clone()).await?;
300 }
301 Ok(transaction)
302 }
303 })
304 .await?;
305 }
306 }
307
308 Ok(0)
309 }
310 Command::Gc(GcCommand {}) => {
311 let space_manager = connect_to_protocol::<fpkg_gc::ManagerMarker>()
312 .context("Failed to connect to space manager service")?;
313 space_manager
314 .gc()
315 .await?
316 .map_err(|err| format_err!("Garbage collection failed with error: {:?}", err))
317 .map(|_| 0i32)
318 }
319 }
320}
321
322async fn fetch_repos(
323 repo_manager: fpkg::RepositoryManagerProxy,
324) -> Result<Vec<pkg::RepositoryConfig>, anyhow::Error> {
325 let (iter, server_end) = fidl::endpoints::create_proxy();
326 repo_manager.list(server_end)?;
327 let mut repos = vec![];
328
329 loop {
330 let chunk = iter.next().await?;
331 if chunk.is_empty() {
332 break;
333 }
334 repos.extend(chunk);
335 }
336
337 repos
338 .into_iter()
339 .map(|repo| pkg::RepositoryConfig::try_from(repo).map_err(anyhow::Error::from))
340 .collect()
341}