1use crate::task::{CurrentTask, CurrentTaskAndLocked, register_delayed_release};
6use crate::vfs::{FdNumber, FileHandle, FileReleaser};
7use bitflags::bitflags;
8use fuchsia_rcu::{RcuArc, RcuReadScope};
9use fuchsia_rcu_collections::rcu_array::RcuArray;
10use starnix_sync::{
11 FileOpsCore, LockBefore, LockEqualOrBefore, Locked, Mutex, MutexGuard, ThreadGroupLimits,
12 Unlocked,
13};
14use starnix_syscalls::SyscallResult;
15use starnix_types::ownership::Releasable;
16use starnix_uapi::errors::Errno;
17use starnix_uapi::open_flags::OpenFlags;
18use starnix_uapi::resource_limits::Resource;
19use starnix_uapi::{FD_CLOEXEC, errno, error};
20use static_assertions::const_assert;
21use std::sync::Arc;
22use std::sync::atomic::{AtomicI32, AtomicUsize, Ordering};
23
24bitflags! {
25 #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
26 pub struct FdFlags: u32 {
27 const CLOEXEC = FD_CLOEXEC;
29 }
30}
31
32impl std::convert::From<FdFlags> for SyscallResult {
33 fn from(value: FdFlags) -> Self {
34 value.bits().into()
35 }
36}
37
38#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq, PartialOrd, Ord)]
42pub struct FdTableId(usize);
43
44impl FdTableId {
45 fn new(id: *const FdTableInner) -> Self {
46 Self(id as usize)
47 }
48
49 pub fn raw(&self) -> usize {
50 self.0
51 }
52}
53
54const FLAGS_MASK: usize = 0x1;
57
58#[derive(Debug, Default)]
62struct EncodedEntry {
63 value: AtomicUsize,
71}
72
73const_assert!(std::mem::align_of::<*const FileReleaser>() >= 1 << FLAGS_MASK);
76
77impl EncodedEntry {
78 fn encode(file: FileHandle, flags: FdFlags) -> usize {
83 let ptr = Arc::into_raw(file) as usize;
84 let flags = (flags.bits() as usize) & FLAGS_MASK;
85 ptr | flags
86 }
87
88 unsafe fn release(id: FdTableId, value: usize) {
94 let ptr = Self::decode_ptr(value);
95 if !ptr.is_null() {
96 let file = unsafe { Arc::from_raw(ptr) };
98 register_delayed_release(FlushedFile(file, id));
99 }
100 }
101
102 fn decode_flags(value: usize) -> FdFlags {
104 FdFlags::from_bits_truncate((value & FLAGS_MASK) as u32)
105 }
106
107 fn decode_ptr(value: usize) -> *const FileReleaser {
109 (value & !FLAGS_MASK) as *const _
110 }
111
112 fn new(entry: FdTableEntry) -> Self {
114 Self { value: AtomicUsize::new(Self::encode(entry.file, entry.flags)) }
115 }
116
117 fn is_some(&self) -> bool {
119 let value = self.value.load(Ordering::Acquire);
120 value != 0
121 }
122
123 fn is_none(&self) -> bool {
125 !self.is_some()
126 }
127
128 fn flags(&self) -> Option<FdFlags> {
130 let value = self.value.load(Ordering::Acquire);
131 if value == 0 {
132 return None;
133 }
134 Some(Self::decode_flags(value))
135 }
136
137 fn set_flags(&self, flags: FdFlags) {
139 loop {
140 let old_value = self.value.load(Ordering::Relaxed);
141 assert!(old_value != 0);
142 let new_value = old_value & !FLAGS_MASK | (flags.bits() as usize) & FLAGS_MASK;
143 if self
144 .value
145 .compare_exchange_weak(old_value, new_value, Ordering::AcqRel, Ordering::Relaxed)
146 .is_ok()
147 {
148 return;
149 }
150 }
151 }
152
153 fn file(&self) -> Option<FileHandle> {
155 self.to_entry().map(|entry| entry.file)
156 }
157
158 fn set_file(&self, id: FdTableId, file: FileHandle) {
160 let ptr = Arc::into_raw(file) as usize;
161 loop {
162 let old_value = self.value.load(Ordering::Relaxed);
163 assert!(old_value != 0);
164 let flags = old_value & FLAGS_MASK;
165 let new_value = ptr | flags;
166 if self
167 .value
168 .compare_exchange_weak(old_value, new_value, Ordering::AcqRel, Ordering::Relaxed)
169 .is_ok()
170 {
171 unsafe { Self::release(id, old_value) };
173 return;
174 }
175 }
176 }
177
178 fn to_entry(&self) -> Option<FdTableEntry> {
180 let value = self.value.load(Ordering::Acquire);
181 if value == 0 {
182 return None;
183 }
184 let flags = Self::decode_flags(value);
185 let ptr = Self::decode_ptr(value);
186 let file = unsafe {
188 Arc::increment_strong_count(ptr);
189 Arc::from_raw(ptr)
190 };
191 Some(FdTableEntry { file, flags })
192 }
193
194 fn set_entry(&self, id: FdTableId, entry: FdTableEntry) -> bool {
196 unsafe { self.set(id, Self::encode(entry.file, entry.flags)) }
198 }
199
200 fn clear(&self, id: FdTableId) -> bool {
202 unsafe { self.set(id, 0) }
204 }
205
206 unsafe fn set(&self, id: FdTableId, value: usize) -> bool {
214 let old_value = self.value.swap(value, Ordering::AcqRel);
215 if old_value != 0 {
216 unsafe { Self::release(id, old_value) };
218 true
219 } else {
220 false
221 }
222 }
223}
224
225impl Clone for EncodedEntry {
226 fn clone(&self) -> Self {
227 if let Some(entry) = self.to_entry() { Self::new(entry) } else { Self::default() }
228 }
229}
230
231impl Drop for EncodedEntry {
232 fn drop(&mut self) {
233 let value = self.value.load(Ordering::Acquire);
234 let ptr = Self::decode_ptr(value);
235 if !ptr.is_null() {
236 let _file = unsafe { Arc::from_raw(ptr) };
238 }
239 }
240}
241
242#[derive(Debug, Clone)]
244struct FdTableEntry {
245 file: FileHandle,
247
248 flags: FdFlags,
250}
251
252struct FlushedFile(FileHandle, FdTableId);
254
255impl Releasable for FlushedFile {
256 type Context<'a> = CurrentTaskAndLocked<'a>;
257 fn release<'a>(self, context: Self::Context<'a>) {
258 let (locked, current_task) = context;
259 let FlushedFile(file, id) = self;
260 file.flush(locked, current_task, id);
261 }
262}
263
264struct FdTableView<'a> {
272 slice: &'a [EncodedEntry],
274}
275
276impl<'a> FdTableView<'a> {
277 fn len(&self) -> usize {
279 self.slice.len()
280 }
281
282 fn is_some(&self, fd: FdNumber) -> bool {
284 self.slice.get(fd.raw() as usize).map_or(false, |entry| entry.is_some())
285 }
286
287 fn is_none(&self, fd: FdNumber) -> bool {
289 !self.is_some(fd)
290 }
291
292 fn get_file(&self, fd: FdNumber) -> Option<FileHandle> {
294 self.slice.get(fd.raw() as usize).and_then(|entry| entry.file())
295 }
296
297 fn get_entry(&self, fd: FdNumber) -> Option<FdTableEntry> {
299 self.slice.get(fd.raw() as usize).and_then(|entry| entry.to_entry())
300 }
301}
302
303struct FdTableWriteGuard<'a> {
304 store: &'a FdTableInner,
305 _write_guard: MutexGuard<'a, ()>,
306}
307
308impl<'a> FdTableWriteGuard<'a> {
309 fn next_fd(&self) -> FdNumber {
311 self.store.next_fd.get()
312 }
313
314 fn calculate_lowest_available_fd(&self, view: &FdTableView<'_>, minfd: &FdNumber) -> FdNumber {
316 let mut fd: FdNumber = *minfd;
317 while view.is_some(fd) {
318 fd = FdNumber::from_raw(fd.raw() + 1);
319 }
320 fd
321 }
322
323 fn get_lowest_available_fd(&self, scope: &RcuReadScope, minfd: FdNumber) -> FdNumber {
325 if minfd > self.store.next_fd.get() {
326 let view = self.store.read(scope);
327 return self.calculate_lowest_available_fd(&view, &minfd);
328 }
329 self.store.next_fd.get()
330 }
331
332 fn get_file(&self, scope: &RcuReadScope, fd: FdNumber) -> Option<FileHandle> {
334 self.store.read(scope).get_file(fd)
335 }
336
337 fn insert_entry(
341 &self,
342 scope: &RcuReadScope,
343 fd: FdNumber,
344 rlimit: u64,
345 entry: FdTableEntry,
346 ) -> Result<bool, Errno> {
347 let raw_fd = fd.raw();
348 if raw_fd < 0 {
349 return error!(EBADF);
350 }
351 if raw_fd as u64 >= rlimit {
352 return error!(EMFILE);
353 }
354 let mut view = self.store.read(scope);
355 if raw_fd == self.store.next_fd.get().raw() {
356 self.store
357 .next_fd
358 .set(self.calculate_lowest_available_fd(&view, &FdNumber::from_raw(raw_fd + 1)));
359 }
360 let raw_fd = raw_fd as usize;
361 if view.len() <= raw_fd {
362 unsafe { self.store.entries.ensure_at_least(raw_fd + 1) };
364 view = self.store.read(scope);
365 }
366 let id = self.store.id();
367 Ok(view.slice[raw_fd].set_entry(id, entry))
368 }
369
370 fn remove_entry(&self, scope: &RcuReadScope, fd: &FdNumber) -> bool {
374 let raw_fd = fd.raw() as usize;
375 let view = self.store.read(scope);
376 if raw_fd >= view.len() {
377 return false;
378 }
379 let id = self.store.id();
380 let removed = view.slice[raw_fd].clear(id);
381 if removed && raw_fd < self.store.next_fd.get().raw() as usize {
382 self.store.next_fd.set(*fd);
383 }
384 removed
385 }
386
387 fn set_fd_flags(
391 &self,
392 scope: &RcuReadScope,
393 fd: FdNumber,
394 flags: FdFlags,
395 ) -> Result<(), Errno> {
396 let view = self.store.read(scope);
397 if view.is_none(fd) {
398 return error!(EBADF);
399 }
400 let raw_fd = fd.raw() as usize;
401 view.slice[raw_fd].set_flags(flags);
402 Ok(())
403 }
404
405 fn retain<F>(&self, scope: &RcuReadScope, mut predicate: F)
411 where
412 F: FnMut(FdNumber, &mut FdFlags) -> bool,
413 {
414 let id = self.store.id();
415 let view = self.store.read(scope);
416 for (index, encoded_entry) in view.slice.iter().enumerate() {
417 let fd = FdNumber::from_raw(index as i32);
418 if let Some(flags) = encoded_entry.flags() {
419 let mut modified_flags = flags;
420 if !predicate(fd, &mut modified_flags) {
421 encoded_entry.clear(id);
422 } else if modified_flags != flags {
423 encoded_entry.set_flags(modified_flags);
424 }
425 }
426 }
427 self.store.next_fd.set(self.calculate_lowest_available_fd(&view, &FdNumber::from_raw(0)));
428 }
429
430 fn remap<F>(&self, scope: &RcuReadScope, predicate: F)
437 where
438 F: Fn(&FileHandle) -> Option<FileHandle>,
439 {
440 let id = self.store.id();
441 let view = self.store.read(scope);
442 for encoded_entry in view.slice.iter() {
443 if let Some(file) = encoded_entry.file() {
444 if let Some(replacement_file) = predicate(&file) {
445 encoded_entry.set_file(id, replacement_file);
446 }
447 }
448 }
449 }
450}
451
452#[derive(Debug, Default)]
457struct AtomicFdNumber {
458 value: AtomicI32,
460}
461
462impl AtomicFdNumber {
463 fn get(&self) -> FdNumber {
467 FdNumber::from_raw(self.value.load(Ordering::Relaxed))
468 }
469
470 fn set(&self, value: FdNumber) {
474 self.value.store(value.raw(), Ordering::Relaxed);
475 }
476}
477
478impl Clone for AtomicFdNumber {
479 fn clone(&self) -> Self {
480 Self { value: AtomicI32::new(self.value.load(Ordering::Relaxed)) }
481 }
482}
483
484#[derive(Debug)]
489struct FdTableInner {
490 entries: RcuArray<EncodedEntry>,
492
493 next_fd: AtomicFdNumber,
495
496 writer_queue: Mutex<()>,
499}
500
501impl Default for FdTableInner {
502 fn default() -> Self {
503 FdTableInner {
504 entries: Default::default(),
505 next_fd: AtomicFdNumber::default(),
506 writer_queue: Mutex::new(()),
507 }
508 }
509}
510
511impl Clone for FdTableInner {
512 fn clone(&self) -> Self {
513 let _guard = self.writer_queue.lock();
514 Self {
515 entries: self.entries.clone(),
516 next_fd: self.next_fd.clone(),
517 writer_queue: Mutex::new(()),
518 }
519 }
520}
521
522impl Drop for FdTableInner {
523 fn drop(&mut self) {
524 let id = self.id();
525 let scope = RcuReadScope::new();
526 let view = self.read(&scope);
527 for entry in view.slice.iter() {
528 entry.clear(id);
529 }
530 }
531}
532
533impl FdTableInner {
534 fn id(&self) -> FdTableId {
536 FdTableId::new(self as *const Self)
537 }
538
539 fn unshare(&self) -> Arc<Self> {
541 Arc::new(self.clone())
542 }
543
544 fn read<'a>(&self, scope: &'a RcuReadScope) -> FdTableView<'a> {
546 let slice = self.entries.as_slice(scope);
547 FdTableView { slice }
548 }
549
550 fn write(&self) -> FdTableWriteGuard<'_> {
553 FdTableWriteGuard { store: self, _write_guard: self.writer_queue.lock() }
554 }
555}
556
557#[derive(Debug, Default)]
559pub struct FdTable {
560 inner: RcuArc<FdTableInner>,
562}
563
564pub enum TargetFdNumber {
566 Default,
568
569 Specific(FdNumber),
571
572 Minimum(FdNumber),
574}
575
576impl FdTable {
577 pub fn id(&self) -> FdTableId {
579 self.inner.read().id()
580 }
581
582 pub fn fork(&self) -> FdTable {
584 let unshared = self.inner.read().unshare();
585 FdTable { inner: RcuArc::new(unshared) }
586 }
587
588 pub fn unshare(&self) {
590 let unshared = self.inner.read().unshare();
591 self.inner.update(unshared);
592 }
593
594 pub fn release(&self) {
596 self.inner.update(Default::default());
597 }
598
599 pub fn exec(&self, locked: &mut Locked<Unlocked>, current_task: &CurrentTask) {
601 self.retain(locked, current_task, |_fd, flags| !flags.contains(FdFlags::CLOEXEC));
602 }
603
604 pub fn insert<L>(
606 &self,
607 locked: &mut Locked<L>,
608 current_task: &CurrentTask,
609 fd: FdNumber,
610 file: FileHandle,
611 ) -> Result<(), Errno>
612 where
613 L: LockBefore<ThreadGroupLimits>,
614 {
615 let flags = FdFlags::empty();
616 let rlimit = current_task.thread_group().get_rlimit(locked, Resource::NOFILE);
617 let inner = self.inner.read();
618 let guard = inner.write();
619 guard.insert_entry(&inner.scope, fd, rlimit, FdTableEntry { file, flags })?;
620 Ok(())
621 }
622
623 pub fn add<L>(
631 &self,
632 locked: &mut Locked<L>,
633 current_task: &CurrentTask,
634 file: FileHandle,
635 flags: FdFlags,
636 ) -> Result<FdNumber, Errno>
637 where
638 L: LockEqualOrBefore<FileOpsCore>,
639 {
640 let locked = locked.cast_locked::<FileOpsCore>();
641 let rlimit = current_task.thread_group().get_rlimit(locked, Resource::NOFILE);
642 let inner = self.inner.read();
643 let guard = inner.write();
644 let fd = guard.next_fd();
645 guard.insert_entry(&inner.scope, fd, rlimit, FdTableEntry { file, flags })?;
646 Ok(fd)
647 }
648
649 pub fn duplicate<L>(
654 &self,
655 locked: &mut Locked<L>,
656 current_task: &CurrentTask,
657 oldfd: FdNumber,
658 target: TargetFdNumber,
659 flags: FdFlags,
660 ) -> Result<FdNumber, Errno>
661 where
662 L: LockBefore<ThreadGroupLimits>,
663 {
664 let rlimit = current_task.thread_group().get_rlimit(locked, Resource::NOFILE);
665 let inner = self.inner.read();
666 let guard = inner.write();
667 let file = guard.get_file(&inner.scope, oldfd).ok_or_else(|| errno!(EBADF))?;
668
669 let fd = match target {
670 TargetFdNumber::Specific(fd) => {
671 if fd.raw() as u64 >= rlimit {
674 return error!(EBADF);
678 }
679 guard.remove_entry(&inner.scope, &fd);
680 fd
681 }
682 TargetFdNumber::Minimum(fd) => guard.get_lowest_available_fd(&inner.scope, fd),
683 TargetFdNumber::Default => {
684 guard.get_lowest_available_fd(&inner.scope, FdNumber::from_raw(0))
685 }
686 };
687 let existing_entry =
688 guard.insert_entry(&inner.scope, fd, rlimit, FdTableEntry { file, flags })?;
689 assert!(!existing_entry);
690 Ok(fd)
691 }
692
693 pub fn get_allowing_opath(&self, fd: FdNumber) -> Result<FileHandle, Errno> {
700 self.get_allowing_opath_with_flags(fd).map(|(file, _flags)| file)
701 }
702
703 pub fn get_allowing_opath_with_flags(
710 &self,
711 fd: FdNumber,
712 ) -> Result<(FileHandle, FdFlags), Errno> {
713 let inner = self.inner.read();
714 let view = inner.read(&inner.scope);
715 view.get_entry(fd).map(|entry| (entry.file, entry.flags)).ok_or_else(|| errno!(EBADF))
716 }
717
718 pub fn get(&self, fd: FdNumber) -> Result<FileHandle, Errno> {
722 let file = self.get_allowing_opath(fd)?;
723 if file.flags().contains(OpenFlags::PATH) {
724 return error!(EBADF);
725 }
726 Ok(file)
727 }
728
729 pub fn close(&self, fd: FdNumber) -> Result<(), Errno> {
733 let inner = self.inner.read();
734 let guard = inner.write();
735 if guard.remove_entry(&inner.scope, &fd) { Ok(()) } else { error!(EBADF) }
736 }
737
738 pub fn get_fd_flags_allowing_opath(&self, fd: FdNumber) -> Result<FdFlags, Errno> {
742 self.get_allowing_opath_with_flags(fd).map(|(_file, flags)| flags)
743 }
744
745 pub fn set_fd_flags(&self, fd: FdNumber, flags: FdFlags) -> Result<(), Errno> {
749 let inner = self.inner.read();
750 let guard = inner.write();
751 let file = guard.get_file(&inner.scope, fd).ok_or_else(|| errno!(EBADF))?;
752 if file.flags().contains(OpenFlags::PATH) {
753 return error!(EBADF);
754 }
755 guard.set_fd_flags(&inner.scope, fd, flags)
756 }
757
758 pub fn set_fd_flags_allowing_opath(&self, fd: FdNumber, flags: FdFlags) -> Result<(), Errno> {
762 let inner = self.inner.read();
763 let guard = inner.write();
764 guard.set_fd_flags(&inner.scope, fd, flags)
765 }
766
767 pub fn retain<L, F>(&self, _locked: &mut Locked<L>, _current_task: &CurrentTask, predicate: F)
773 where
774 L: LockEqualOrBefore<FileOpsCore>,
775 F: Fn(FdNumber, &mut FdFlags) -> bool,
776 {
777 let inner = self.inner.read();
778 let guard = inner.write();
779 guard.retain(&inner.scope, predicate);
780 }
781
782 pub fn get_all_fds(&self) -> Vec<FdNumber> {
784 let inner = self.inner.read();
785 let view = inner.read(&inner.scope);
786 view.slice
787 .iter()
788 .enumerate()
789 .filter_map(|(index, encoded_entry)| {
790 if encoded_entry.is_none() { None } else { Some(FdNumber::from_raw(index as i32)) }
791 })
792 .collect()
793 }
794
795 pub fn remap<L, F: Fn(&FileHandle) -> Option<FileHandle>>(
800 &self,
801 _locked: &mut Locked<L>,
802 _current_task: &CurrentTask,
803 predicate: F,
804 ) where
805 L: LockEqualOrBefore<FileOpsCore>,
806 {
807 let inner = self.inner.read();
808 let guard = inner.write();
809 guard.remap(&inner.scope, predicate);
810 }
811}
812
813impl Clone for FdTable {
814 fn clone(&self) -> Self {
815 FdTable { inner: self.inner.clone() }
816 }
817}
818
819#[cfg(test)]
820mod test {
821 use super::*;
822 use crate::fs::fuchsia::SyslogFile;
823 use crate::testing::*;
824
825 fn add(
826 locked: &mut Locked<Unlocked>,
827 current_task: &CurrentTask,
828 files: &FdTable,
829 file: FileHandle,
830 ) -> Result<FdNumber, Errno> {
831 files.add(locked, current_task, file, FdFlags::empty())
832 }
833
834 #[::fuchsia::test]
835 async fn test_fd_table_install() {
836 spawn_kernel_and_run(async |locked, current_task| {
837 let files = FdTable::default();
838 let file = SyslogFile::new_file(locked, ¤t_task);
839
840 let fd0 = add(locked, ¤t_task, &files, file.clone()).unwrap();
841 assert_eq!(fd0.raw(), 0);
842 let fd1 = add(locked, ¤t_task, &files, file.clone()).unwrap();
843 assert_eq!(fd1.raw(), 1);
844
845 assert!(Arc::ptr_eq(&files.get(fd0).unwrap(), &file));
846 assert!(Arc::ptr_eq(&files.get(fd1).unwrap(), &file));
847 assert_eq!(files.get(FdNumber::from_raw(fd1.raw() + 1)).map(|_| ()), error!(EBADF));
848
849 files.release();
850 })
851 .await;
852 }
853
854 #[::fuchsia::test]
855 async fn test_fd_table_fork() {
856 spawn_kernel_and_run(async |locked, current_task| {
857 let files = FdTable::default();
858 let file = SyslogFile::new_file(locked, ¤t_task);
859
860 let fd0 = add(locked, ¤t_task, &files, file.clone()).unwrap();
861 let fd1 = add(locked, ¤t_task, &files, file).unwrap();
862 let fd2 = FdNumber::from_raw(2);
863
864 let forked = files.fork();
865
866 assert_eq!(
867 Arc::as_ptr(&files.get(fd0).unwrap()),
868 Arc::as_ptr(&forked.get(fd0).unwrap())
869 );
870 assert_eq!(
871 Arc::as_ptr(&files.get(fd1).unwrap()),
872 Arc::as_ptr(&forked.get(fd1).unwrap())
873 );
874 assert!(files.get(fd2).is_err());
875 assert!(forked.get(fd2).is_err());
876
877 files.set_fd_flags_allowing_opath(fd0, FdFlags::CLOEXEC).unwrap();
878 assert_eq!(FdFlags::CLOEXEC, files.get_fd_flags_allowing_opath(fd0).unwrap());
879 assert_ne!(FdFlags::CLOEXEC, forked.get_fd_flags_allowing_opath(fd0).unwrap());
880
881 forked.release();
882 files.release();
883 })
884 .await;
885 }
886
887 #[::fuchsia::test]
888 async fn test_fd_table_exec() {
889 spawn_kernel_and_run(async |locked, current_task| {
890 let files = FdTable::default();
891 let file = SyslogFile::new_file(locked, ¤t_task);
892
893 let fd0 = add(locked, ¤t_task, &files, file.clone()).unwrap();
894 let fd1 = add(locked, ¤t_task, &files, file).unwrap();
895
896 files.set_fd_flags_allowing_opath(fd0, FdFlags::CLOEXEC).unwrap();
897
898 assert!(files.get(fd0).is_ok());
899 assert!(files.get(fd1).is_ok());
900
901 files.exec(locked, ¤t_task);
902
903 assert!(files.get(fd0).is_err());
904 assert!(files.get(fd1).is_ok());
905
906 files.release();
907 })
908 .await;
909 }
910
911 #[::fuchsia::test]
912 async fn test_fd_table_pack_values() {
913 spawn_kernel_and_run(async |locked, current_task| {
914 let files = FdTable::default();
915 let file = SyslogFile::new_file(locked, ¤t_task);
916
917 let fd0 = add(locked, ¤t_task, &files, file.clone()).unwrap();
919 let fd1 = add(locked, ¤t_task, &files, file.clone()).unwrap();
920 assert_eq!(fd0.raw(), 0);
921 assert_eq!(fd1.raw(), 1);
922
923 assert!(files.close(fd0).is_ok());
925 assert!(files.close(fd0).is_err());
926 assert!(files.get(fd0).is_err());
928
929 let another_fd = add(locked, ¤t_task, &files, file).unwrap();
931 assert_eq!(another_fd.raw(), 0);
932
933 files.release();
934 })
935 .await;
936 }
937}