use fidl::unpersist;
use fidl_cf_sc_internal_archivistconfig::Config as FidlConfig;
use fuchsia_inspect::{ArrayProperty, Node};
use fuchsia_runtime::{take_startup_handle, HandleInfo, HandleType};
#[derive(Debug)]
pub struct Config {
pub allow_serial_logs: Vec<String>,
pub bind_services: Vec<String>,
pub component_initial_interests: Vec<String>,
pub deny_serial_log_tags: Vec<String>,
pub enable_klog: bool,
pub log_to_debuglog: bool,
pub logs_max_cached_original_bytes: u64,
pub maximum_concurrent_snapshots_per_reader: u64,
pub num_threads: u64,
pub per_component_batch_timeout_seconds: i64,
pub pipelines_path: String,
}
impl Config {
pub fn take_from_startup_handle() -> Self {
let config_vmo: zx::Vmo =
take_startup_handle(HandleInfo::new(HandleType::ComponentConfigVmo, 0))
.expect("Config VMO handle must be provided and cannot already have been taken.")
.into();
let config_size =
config_vmo.get_content_size().expect("must be able to read config vmo content size");
assert_ne!(config_size, 0, "config vmo must be non-empty");
let config_bytes =
config_vmo.read_to_vec(0, config_size).expect("must be able to read config vmo");
let checksum_length = u16::from_le_bytes([config_bytes[0], config_bytes[1]]) as usize;
let fidl_start = 2 + checksum_length;
let observed_checksum = &config_bytes[2..fidl_start];
let expected_checksum = vec![
0x96, 0x3f, 0xd8, 0xf0, 0x72, 0xfd, 0x27, 0x82, 0xde, 0x7d, 0x85, 0x1c, 0x6b, 0x70,
0x2a, 0x0c, 0x09, 0x50, 0x05, 0xa7, 0x22, 0x13, 0x5f, 0x5d, 0xc3, 0xd6, 0x73, 0x21,
0xf0, 0x43, 0x88, 0xe5,
];
assert_eq!(
observed_checksum, expected_checksum,
"checksum from config VMO does not match expected checksum"
);
let fidl_config: FidlConfig = unpersist(&config_bytes[fidl_start..])
.expect("must be able to parse bytes as config FIDL");
Self {
allow_serial_logs: fidl_config.allow_serial_logs,
bind_services: fidl_config.bind_services,
component_initial_interests: fidl_config.component_initial_interests,
deny_serial_log_tags: fidl_config.deny_serial_log_tags,
enable_klog: fidl_config.enable_klog,
log_to_debuglog: fidl_config.log_to_debuglog,
logs_max_cached_original_bytes: fidl_config.logs_max_cached_original_bytes,
maximum_concurrent_snapshots_per_reader: fidl_config
.maximum_concurrent_snapshots_per_reader,
num_threads: fidl_config.num_threads,
per_component_batch_timeout_seconds: fidl_config.per_component_batch_timeout_seconds,
pipelines_path: fidl_config.pipelines_path,
}
}
pub fn record_inspect(&self, inspector_node: &Node) {
let arr =
inspector_node.create_string_array("allow_serial_logs", self.allow_serial_logs.len());
for i in 0..self.allow_serial_logs.len() {
arr.set(i, &self.allow_serial_logs[i]);
}
inspector_node.record(arr);
let arr = inspector_node.create_string_array("bind_services", self.bind_services.len());
for i in 0..self.bind_services.len() {
arr.set(i, &self.bind_services[i]);
}
inspector_node.record(arr);
let arr = inspector_node.create_string_array(
"component_initial_interests",
self.component_initial_interests.len(),
);
for i in 0..self.component_initial_interests.len() {
arr.set(i, &self.component_initial_interests[i]);
}
inspector_node.record(arr);
let arr = inspector_node
.create_string_array("deny_serial_log_tags", self.deny_serial_log_tags.len());
for i in 0..self.deny_serial_log_tags.len() {
arr.set(i, &self.deny_serial_log_tags[i]);
}
inspector_node.record(arr);
inspector_node.record_bool("enable_klog", self.enable_klog);
inspector_node.record_bool("log_to_debuglog", self.log_to_debuglog);
inspector_node
.record_uint("logs_max_cached_original_bytes", self.logs_max_cached_original_bytes);
inspector_node.record_uint(
"maximum_concurrent_snapshots_per_reader",
self.maximum_concurrent_snapshots_per_reader,
);
inspector_node.record_uint("num_threads", self.num_threads);
inspector_node.record_int(
"per_component_batch_timeout_seconds",
self.per_component_batch_timeout_seconds,
);
inspector_node.record_string("pipelines_path", &self.pipelines_path);
}
}