1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
use fidl::encoding::unpersist;
use fidl_cf_sc_internal_archivistconfig::Config as FidlConfig;
use fuchsia_inspect::{ArrayProperty, Node};
use fuchsia_runtime::{take_startup_handle, HandleInfo, HandleType};
use fuchsia_zircon as zx;
#[derive(Debug)]
pub struct Config {
pub bind_services: Vec<String>,
pub enable_component_event_provider: bool,
pub enable_event_source: bool,
pub enable_klog: bool,
pub enable_log_connector: bool,
pub is_unattributed: bool,
pub log_to_debuglog: bool,
pub logs_max_cached_original_bytes: u64,
pub maximum_concurrent_snapshots_per_reader: u64,
pub num_threads: u64,
pub pipelines_path: String,
}
impl Config {
pub fn take_from_startup_handle() -> Self {
let config_vmo: zx::Vmo =
take_startup_handle(HandleInfo::new(HandleType::ComponentConfigVmo, 0))
.expect("Config VMO handle must be provided and cannot already have been taken.")
.into();
let config_size =
config_vmo.get_content_size().expect("must be able to read config vmo content size");
assert_ne!(config_size, 0, "config vmo must be non-empty");
let mut config_bytes = Vec::new();
config_bytes.resize(config_size as usize, 0);
config_vmo.read(&mut config_bytes, 0).expect("must be able to read config vmo");
let checksum_length = u16::from_le_bytes([config_bytes[0], config_bytes[1]]) as usize;
let fidl_start = 2 + checksum_length;
let observed_checksum = &config_bytes[2..fidl_start];
let expected_checksum = vec![
0x9f, 0xb5, 0x02, 0xb1, 0x9c, 0x9f, 0x46, 0x0f, 0xd4, 0xc0, 0xbb, 0x23, 0x2e, 0xed,
0x96, 0xa0, 0x43, 0x3f, 0x3f, 0x1a, 0xc3, 0x57, 0x3f, 0x1a, 0x2a, 0x62, 0xc8, 0x6b,
0x87, 0x6a, 0x82, 0x5d,
];
assert_eq!(
observed_checksum, expected_checksum,
"checksum from config VMO does not match expected checksum"
);
let fidl_config: FidlConfig = unpersist(&config_bytes[fidl_start..])
.expect("must be able to parse bytes as config FIDL");
Self {
bind_services: fidl_config.bind_services,
enable_component_event_provider: fidl_config.enable_component_event_provider,
enable_event_source: fidl_config.enable_event_source,
enable_klog: fidl_config.enable_klog,
enable_log_connector: fidl_config.enable_log_connector,
is_unattributed: fidl_config.is_unattributed,
log_to_debuglog: fidl_config.log_to_debuglog,
logs_max_cached_original_bytes: fidl_config.logs_max_cached_original_bytes,
maximum_concurrent_snapshots_per_reader: fidl_config
.maximum_concurrent_snapshots_per_reader,
num_threads: fidl_config.num_threads,
pipelines_path: fidl_config.pipelines_path,
}
}
pub fn record_inspect(&self, inspector_node: &Node) {
let arr = inspector_node.create_string_array("bind_services", self.bind_services.len());
for i in 0..self.bind_services.len() {
arr.set(i, &self.bind_services[i]);
}
inspector_node.record(arr);
inspector_node
.record_bool("enable_component_event_provider", self.enable_component_event_provider);
inspector_node.record_bool("enable_event_source", self.enable_event_source);
inspector_node.record_bool("enable_klog", self.enable_klog);
inspector_node.record_bool("enable_log_connector", self.enable_log_connector);
inspector_node.record_bool("is_unattributed", self.is_unattributed);
inspector_node.record_bool("log_to_debuglog", self.log_to_debuglog);
inspector_node
.record_uint("logs_max_cached_original_bytes", self.logs_max_cached_original_bytes);
inspector_node.record_uint(
"maximum_concurrent_snapshots_per_reader",
self.maximum_concurrent_snapshots_per_reader,
);
inspector_node.record_uint("num_threads", self.num_threads);
inspector_node.record_string("pipelines_path", &self.pipelines_path);
}
}