starnix_core/fs/fuchsia/
sync_file.rs

1// Copyright 2023 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use crate::fs::fuchsia::RemoteCounter;
6use crate::mm::MemoryAccessorExt;
7use crate::task::{
8    CurrentTask, EventHandler, ManyZxHandleSignalHandler, SignalHandler, SignalHandlerInner,
9    WaitCanceler, Waiter,
10};
11use crate::vfs::buffers::{InputBuffer, OutputBuffer};
12use crate::vfs::{
13    Anon, FdFlags, FdNumber, FileObject, FileOps, fileops_impl_nonseekable, fileops_impl_noop_sync,
14};
15
16use starnix_lifecycle::AtomicUsizeCounter;
17use starnix_logging::{CATEGORY_STARNIX, impossible_error, log_warn, trace_duration};
18use starnix_sync::{FileOpsCore, Locked, Unlocked};
19use starnix_syscalls::{SUCCESS, SyscallArg, SyscallResult};
20use starnix_uapi::errors::Errno;
21use starnix_uapi::open_flags::OpenFlags;
22use starnix_uapi::user_address::{UserAddress, UserRef};
23use starnix_uapi::vfs::FdEvents;
24use starnix_uapi::{
25    SYNC_IOC_MAGIC, c_char, errno, error, sync_fence_info, sync_file_info, sync_merge_data,
26};
27use std::collections::HashSet;
28use std::sync::Arc;
29use zx::{AsHandleRef, HandleBased};
30
31// Implementation of the sync framework described at:
32// https://source.android.com/docs/core/graphics/sync
33//
34// A sync point "is a single value or point on a sync_timeline. A point has three states: active,
35// signaled, and error. Points start in the active state and transition to the signaled or error
36// states."  A timestamp of the state transition is returned by the ioctl SYNC_IOC_FILE_INFO,
37// so we use VMOs to implement the sync point.  The timestamp is stored in the first 8 bytes of
38// the VMO and should be stored before the object signal state change.  This timestamp is always
39// early; while in most cases only by a slight amount, the difference could be substantial if the
40// signaling thread is de-scheduled in the middle of the two syscalls.
41// TODO(b/305781995) - use events instead of VMOs.
42//
43
44const SYNC_IOC_MERGE: u8 = 3;
45const SYNC_IOC_FILE_INFO: u8 = 4;
46
47#[derive(Clone, Debug)]
48pub enum Timeline {
49    Magma,
50    Hwc,
51}
52
53#[derive(PartialEq, Copy, Clone)]
54// Error status (-1) is not currently used.
55pub enum Status {
56    Active = 0,
57    Signaled = 1,
58}
59
60#[derive(Clone)]
61pub struct SyncPoint {
62    pub timeline: Timeline,
63    pub counter: Arc<zx::Counter>,
64}
65
66impl SyncPoint {
67    pub fn new(timeline: Timeline, counter: zx::Counter) -> SyncPoint {
68        SyncPoint { timeline, counter: Arc::new(counter) }
69    }
70}
71
72pub struct SyncFence {
73    pub sync_points: Vec<SyncPoint>,
74}
75
76pub struct SyncFile {
77    pub name: [u8; 32],
78    pub fence: SyncFence,
79}
80
81struct FenceState {
82    status: Status,
83    timestamp_ns: u64,
84}
85
86impl SyncFile {
87    const SIGNALS: zx::Signals = zx::Signals::COUNTER_SIGNALED;
88
89    pub fn new(name: [u8; 32], fence: SyncFence) -> SyncFile {
90        SyncFile { name, fence }
91    }
92
93    fn get_fence_state(&self) -> Vec<FenceState> {
94        let mut state: Vec<FenceState> = vec![];
95
96        for sync_point in &self.fence.sync_points {
97            if sync_point.counter.wait_one(Self::SIGNALS, zx::MonotonicInstant::ZERO).to_result()
98                == Err(zx::Status::TIMED_OUT)
99            {
100                state.push(FenceState { status: Status::Active, timestamp_ns: 0 });
101            } else {
102                state.push(FenceState {
103                    status: Status::Signaled,
104                    timestamp_ns: sync_point.counter.read().unwrap() as u64,
105                });
106            }
107        }
108        state
109    }
110}
111
112impl FileOps for SyncFile {
113    fileops_impl_nonseekable!();
114    fileops_impl_noop_sync!();
115
116    fn to_handle(
117        &self,
118        _file: &FileObject,
119        _current_task: &CurrentTask,
120    ) -> Result<Option<zx::NullableHandle>, Errno> {
121        if self.fence.sync_points.len() != 1 {
122            log_warn!(
123                "SyncFile::to_handle failed: multiple sync points ({}) not supported: {:?}",
124                self.fence.sync_points.len(),
125                self.fence.sync_points.iter().map(|p| p.timeline.clone()).collect::<Vec<_>>()
126            );
127            return error!(ENOTSUP);
128        }
129        let dup = self.fence.sync_points[0]
130            .counter
131            .duplicate_handle(zx::Rights::SAME_RIGHTS)
132            .map_err(impossible_error)?;
133        Ok(Some(dup.into()))
134    }
135
136    fn ioctl(
137        &self,
138        locked: &mut Locked<Unlocked>,
139        _file: &FileObject,
140        current_task: &CurrentTask,
141        request: u32,
142        arg: SyscallArg,
143    ) -> Result<SyscallResult, Errno> {
144        let user_addr = UserAddress::from(arg);
145        let ioctl_type = (request >> 8) as u8;
146        let ioctl_number = request as u8;
147
148        if ioctl_type != SYNC_IOC_MAGIC {
149            log_warn!("Unexpected type {:?}", ioctl_type);
150            return error!(EINVAL);
151        }
152
153        match ioctl_number {
154            SYNC_IOC_MERGE => {
155                trace_duration!(CATEGORY_STARNIX, "SyncFileMerge");
156                let user_ref = UserRef::new(user_addr);
157                let mut merge_data: sync_merge_data = current_task.read_object(user_ref)?;
158                let file2 = current_task.files.get(FdNumber::from_raw(merge_data.fd2))?;
159
160                let mut fence = SyncFence { sync_points: vec![] };
161                let mut set = HashSet::<zx::Koid>::new();
162
163                for sync_point in &self.fence.sync_points {
164                    let koid = sync_point.counter.get_koid().unwrap();
165                    if set.insert(koid) {
166                        fence.sync_points.push(sync_point.clone());
167                    }
168                }
169
170                if let Some(file2) = file2.downcast_file::<SyncFile>() {
171                    for sync_point in &file2.fence.sync_points {
172                        let koid = sync_point.counter.get_koid().unwrap();
173                        if set.insert(koid) {
174                            fence.sync_points.push(sync_point.clone());
175                        }
176                    }
177                } else if let Some(file2) = file2.downcast_file::<RemoteCounter>() {
178                    let counter = file2.duplicate_handle()?;
179                    let koid = counter.get_koid().map_err(impossible_error)?;
180                    if set.insert(koid) {
181                        fence.sync_points.push(SyncPoint::new(Timeline::Hwc, counter.into()));
182                    }
183                } else {
184                    return error!(EINVAL);
185                }
186
187                // Remove sync points that are already signaled.
188                let mut i = 0 as usize;
189                let mut last_signaled_timestamp_ns = 0;
190                let mut last_signaled_sync_point: Option<SyncPoint> = None;
191                while i < fence.sync_points.len() {
192                    if fence.sync_points[i]
193                        .counter
194                        .wait_one(Self::SIGNALS, zx::MonotonicInstant::ZERO)
195                        .to_result()
196                        != Err(zx::Status::TIMED_OUT)
197                    {
198                        let timestamp_ns =
199                            fence.sync_points[i].counter.read().map_err(|_| errno!(EIO))?;
200                        let removed = fence.sync_points.remove(i);
201                        if i == 0 && timestamp_ns >= last_signaled_timestamp_ns {
202                            last_signaled_timestamp_ns = timestamp_ns;
203                            last_signaled_sync_point = Some(removed);
204                        }
205                        continue;
206                    }
207                    i += 1;
208                }
209                if fence.sync_points.is_empty() {
210                    fence.sync_points.push(last_signaled_sync_point.expect("No sync points left."));
211                }
212
213                let name = merge_data.name.map(|x| x as u8);
214                // TODO: https://fxbug.dev/407611229 - Verify whether "sync_file" should be private.
215                let file = Anon::new_private_file(
216                    locked,
217                    current_task,
218                    Box::new(SyncFile::new(name, fence)),
219                    OpenFlags::RDWR,
220                    "sync_file",
221                );
222
223                let fd = current_task.add_file(locked, file, FdFlags::empty())?;
224                merge_data.fence = fd.raw();
225
226                current_task.write_object(user_ref, &merge_data)?;
227                Ok(SUCCESS)
228            }
229            SYNC_IOC_FILE_INFO => {
230                trace_duration!(CATEGORY_STARNIX, "SyncFileInfo");
231                let user_ref = UserRef::new(user_addr);
232                let mut info: sync_file_info = current_task.read_object(user_ref)?;
233
234                for i in 0..self.name.len() {
235                    info.name[i] = self.name[i] as c_char;
236                }
237                info.status = 0;
238
239                if info.num_fences == 0 {
240                    info.num_fences = self.fence.sync_points.len() as u32;
241                } else if info.num_fences > self.fence.sync_points.len() as u32 {
242                    return error!(EINVAL);
243                } else {
244                    let fence_state = self.get_fence_state();
245                    let mut user_addr = info.sync_fence_info;
246
247                    let mut sync_file_status = 1;
248                    for (i, state) in fence_state.iter().enumerate() {
249                        if state.status == Status::Active {
250                            sync_file_status = 0;
251                        }
252                        if i < info.num_fences as usize {
253                            // Note: obj_name not supported.
254                            let mut fence_info = sync_fence_info {
255                                status: state.status as i32,
256                                timestamp_ns: state.timestamp_ns,
257                                ..sync_fence_info::default()
258                            };
259                            let driver_name = match self.fence.sync_points[i].timeline {
260                                Timeline::Magma => b"Magma\0",
261                                Timeline::Hwc => b"Hwc\0\0\0",
262                            };
263                            assert!(driver_name.len() <= fence_info.driver_name.len());
264                            for i in 0..driver_name.len() {
265                                fence_info.driver_name[i] = driver_name[i] as c_char;
266                            }
267
268                            let fence_user_ref = UserRef::new(UserAddress::from(user_addr));
269                            user_addr += std::mem::size_of::<sync_fence_info>() as u64;
270
271                            current_task.write_object(fence_user_ref, &fence_info)?;
272                        }
273                    }
274
275                    info.status = sync_file_status;
276                }
277
278                current_task.write_object(user_ref, &info)?;
279                Ok(SUCCESS)
280            }
281            _ => {
282                error!(EINVAL)
283            }
284        }
285    }
286
287    fn wait_async(
288        &self,
289        _locked: &mut Locked<FileOpsCore>,
290        _file: &FileObject,
291        _current_task: &CurrentTask,
292        waiter: &Waiter,
293        events: FdEvents,
294        event_handler: EventHandler,
295    ) -> Option<WaitCanceler> {
296        if !events.contains(FdEvents::POLLIN) {
297            return None;
298        }
299
300        let count = Arc::<AtomicUsizeCounter>::new(0.into());
301
302        let mut canceler = WaitCanceler::new_noop();
303
304        for sync_point in &self.fence.sync_points {
305            let signal_handler = SignalHandler {
306                inner: SignalHandlerInner::ManyZxHandle(ManyZxHandleSignalHandler {
307                    count: self.fence.sync_points.len(),
308                    counter: count.clone(),
309                    expected_signals: Self::SIGNALS,
310                    events: FdEvents::POLLIN,
311                }),
312                event_handler: event_handler.clone(),
313                err_code: None,
314            };
315
316            let canceler_result = waiter.wake_on_zircon_signals(
317                sync_point.counter.as_ref(),
318                Self::SIGNALS,
319                signal_handler,
320            );
321            let canceler_result = match canceler_result {
322                Ok(o) => o,
323                Err(e) => {
324                    log_warn!("Error returned from wake_on_zircon_signals: {:?}", e);
325                    return None;
326                }
327            };
328
329            // The wakeup is edge triggered, so handles that were already signaled will never get
330            // a callback. Normally the "already signaled" case is handled by a call to
331            // query_events() after this query_async() returns; however that works only if all
332            // handles are signaled.  Here we perform the counting, and cancel waits, for any
333            // handles currently signaled.
334            if sync_point.counter.wait_one(Self::SIGNALS, zx::MonotonicInstant::ZERO).to_result()
335                == Err(zx::Status::TIMED_OUT)
336            {
337                canceler = WaitCanceler::merge_unbounded(
338                    canceler,
339                    WaitCanceler::new_port(canceler_result),
340                );
341            } else {
342                canceler_result.cancel();
343                count.next();
344            }
345        }
346
347        Some(canceler)
348    }
349
350    fn query_events(
351        &self,
352        _locked: &mut Locked<FileOpsCore>,
353        _file: &FileObject,
354        _current_task: &CurrentTask,
355    ) -> Result<FdEvents, Errno> {
356        let fence_state = self.get_fence_state();
357
358        for state in fence_state.iter() {
359            if state.status == Status::Active {
360                return Ok(FdEvents::empty());
361            }
362        }
363
364        Ok(FdEvents::POLLIN)
365    }
366
367    fn read(
368        &self,
369        _locked: &mut Locked<FileOpsCore>,
370        _file: &FileObject,
371        _current_task: &CurrentTask,
372        _offset: usize,
373        _data: &mut dyn OutputBuffer,
374    ) -> Result<usize, Errno> {
375        error!(ENODEV)
376    }
377
378    fn write(
379        &self,
380        _locked: &mut Locked<FileOpsCore>,
381        _file: &FileObject,
382        _current_task: &CurrentTask,
383        _offset: usize,
384        _data: &mut dyn InputBuffer,
385    ) -> Result<usize, Errno> {
386        error!(ENODEV)
387    }
388}