1use crate::mm::{
6 MemoryAccessorExt, NumberOfElementsRead, RemoteMemoryManager, TaskMemoryAccessor,
7 UNIFIED_ASPACES_ENABLED, read_to_array, read_to_object_as_bytes, read_to_vec,
8};
9use crate::task::{CurrentTask, Task};
10use smallvec::{SmallVec, smallvec};
11use starnix_types::user_buffer::{UserBuffer, UserBuffers};
12use starnix_uapi::errors::{ENOTSUP, Errno};
13use starnix_uapi::user_address::UserAddress;
14use starnix_uapi::{errno, error};
15use std::mem::MaybeUninit;
16use std::ops::{Deref, DerefMut};
17use zerocopy::FromBytes;
18
19pub type OutputBufferCallback<'a> = dyn FnMut(&mut [MaybeUninit<u8>]) -> Result<usize, Errno> + 'a;
22
23fn slice_to_maybe_uninit(buffer: &[u8]) -> &[MaybeUninit<u8>] {
24 unsafe { std::slice::from_raw_parts(buffer.as_ptr() as *const MaybeUninit<u8>, buffer.len()) }
26}
27
28pub trait Iovec: Sized {
29 fn create(buffer: &UserBuffer) -> Self;
30}
31
32impl Iovec for syncio::zxio::iovec {
33 fn create(buffer: &UserBuffer) -> Self {
34 Self { iov_base: buffer.address.ptr() as *mut starnix_uapi::c_void, iov_len: buffer.length }
35 }
36}
37
38impl Iovec for syncio::zxio::zx_iovec {
39 fn create(buffer: &UserBuffer) -> Self {
40 Self { buffer: buffer.address.ptr() as *mut starnix_uapi::c_void, capacity: buffer.length }
41 }
42}
43
44impl Iovec for zx::sys::zx_iovec_t {
45 fn create(buffer: &UserBuffer) -> Self {
46 Self { buffer: buffer.address.ptr() as *const u8, capacity: buffer.length }
47 }
48}
49
50const IOVECS_IN_HEAP_THRESHOLD: usize = 5;
51
52pub struct IovecsRef<'a, I: Sized> {
54 iovecs: SmallVec<[I; IOVECS_IN_HEAP_THRESHOLD]>,
55 _marker: std::marker::PhantomData<&'a I>,
56}
57
58impl<'a, I: Iovec> IovecsRef<'a, I> {
59 fn new<B: Buffer + ?Sized>(buf: &'a mut B) -> Result<Self, Errno> {
65 let mut iovecs = SmallVec::with_capacity(buf.segments_count()?);
66 buf.peek_each_segment(&mut |buffer| iovecs.push(I::create(buffer)))?;
67 Ok(IovecsRef { iovecs, _marker: Default::default() })
68 }
69}
70
71impl<I> Deref for IovecsRef<'_, I> {
72 type Target = [I];
73 fn deref(&self) -> &Self::Target {
74 &self.iovecs
75 }
76}
77
78impl<I> DerefMut for IovecsRef<'_, I> {
79 fn deref_mut(&mut self) -> &mut Self::Target {
80 &mut self.iovecs
81 }
82}
83
84pub type PeekBufferSegmentsCallback<'a> = dyn FnMut(&UserBuffer) + 'a;
85
86pub trait Buffer: std::fmt::Debug {
90 fn segments_count(&self) -> Result<usize, Errno>;
93
94 fn peek_each_segment(
100 &mut self,
101 callback: &mut PeekBufferSegmentsCallback<'_>,
102 ) -> Result<(), Errno>;
103
104 fn peek_all_segments_as_iovecs(&mut self) -> Result<IovecsRef<'_, syncio::zxio::iovec>, Errno> {
114 IovecsRef::new(self)
115 }
116}
117
118pub fn with_iovec_segments<B: Buffer + ?Sized, I: Iovec, T>(
128 data: &mut B,
129 f: impl FnOnce(&mut [I]) -> Result<T, Errno>,
130) -> Option<Result<T, Errno>> {
131 if !UNIFIED_ASPACES_ENABLED {
132 return None;
133 }
134
135 match IovecsRef::new(data) {
136 Ok(mut o) => Some(f(&mut o)),
137 Err(e) => {
138 if e.code == ENOTSUP {
139 None
140 } else {
141 Some(Err(e))
142 }
143 }
144 }
145}
146
147pub trait OutputBuffer: Buffer {
151 fn write_each(&mut self, callback: &mut OutputBufferCallback<'_>) -> Result<usize, Errno>;
157
158 fn available(&self) -> usize;
160
161 fn bytes_written(&self) -> usize;
163
164 fn zero(&mut self) -> Result<usize, Errno>;
166
167 unsafe fn advance(&mut self, length: usize) -> Result<(), Errno>;
173
174 fn write(&mut self, buffer: &[u8]) -> Result<usize, Errno> {
179 let mut buffer = slice_to_maybe_uninit(buffer);
180
181 self.write_each(&mut move |data| {
182 let size = std::cmp::min(buffer.len(), data.len());
183 let (to_clone, remaining) = buffer.split_at(size);
184 data[0..size].clone_from_slice(to_clone);
185 buffer = remaining;
186 Ok(size)
187 })
188 }
189
190 fn write_all(&mut self, buffer: &[u8]) -> Result<usize, Errno> {
196 let size = self.write(buffer)?;
197 if size != buffer.len() { error!(EINVAL) } else { Ok(size) }
198 }
199
200 fn write_buffer(&mut self, input: &mut dyn InputBuffer) -> Result<usize, Errno> {
206 self.write_each(&mut move |data| {
207 let size = std::cmp::min(data.len(), input.available());
208 input.read_exact(&mut data[0..size])
209 })
210 }
211}
212
213pub type InputBufferCallback<'a> = dyn FnMut(&[u8]) -> Result<usize, Errno> + 'a;
217
218pub trait InputBuffer: Buffer {
222 fn peek_each(&mut self, callback: &mut InputBufferCallback<'_>) -> Result<usize, Errno>;
228
229 fn available(&self) -> usize;
231
232 fn bytes_read(&self) -> usize;
234
235 fn drain(&mut self) -> usize;
238
239 fn advance(&mut self, length: usize) -> Result<(), Errno>;
241
242 fn read_each(&mut self, callback: &mut InputBufferCallback<'_>) -> Result<usize, Errno> {
248 let length = self.peek_each(callback)?;
249 self.advance(length)?;
250 Ok(length)
251 }
252
253 fn read_all(&mut self) -> Result<Vec<u8>, Errno> {
255 let result = self.peek_all()?;
256 let drain_result = self.drain();
257 assert!(result.len() == drain_result);
258 Ok(result)
259 }
260
261 fn peek_all(&mut self) -> Result<Vec<u8>, Errno> {
263 unsafe {
265 read_to_vec::<u8, _>(self.available(), |buf| self.peek(buf).map(NumberOfElementsRead))
266 }
267 }
268
269 fn peek(&mut self, buffer: &mut [MaybeUninit<u8>]) -> Result<usize, Errno> {
275 let mut index = 0;
276 self.peek_each(&mut move |data| {
277 let data = slice_to_maybe_uninit(data);
278 let size = std::cmp::min(buffer.len() - index, data.len());
279 buffer[index..index + size].clone_from_slice(&data[..size]);
280 index += size;
281 Ok(size)
282 })
283 }
284
285 fn read(&mut self, buffer: &mut [MaybeUninit<u8>]) -> Result<usize, Errno> {
291 let length = self.peek(buffer)?;
292 self.advance(length)?;
293 Ok(length)
294 }
295
296 fn read_exact(&mut self, buffer: &mut [MaybeUninit<u8>]) -> Result<usize, Errno> {
302 let size = self.read(buffer)?;
303 if size != buffer.len() { error!(EINVAL) } else { Ok(size) }
304 }
305}
306
307pub trait InputBufferExt: InputBuffer {
308 fn read_to_vec_exact(&mut self, len: usize) -> Result<Vec<u8>, Errno> {
312 unsafe { read_to_vec::<u8, _>(len, |buf| self.read_exact(buf).map(NumberOfElementsRead)) }
314 }
315
316 fn read_to_vec_limited(&mut self, limit: usize) -> Result<Vec<u8>, Errno> {
318 unsafe { read_to_vec::<u8, _>(limit, |buf| self.read(buf).map(NumberOfElementsRead)) }
320 }
321
322 fn read_to_array<const N: usize>(&mut self) -> Result<[u8; N], Errno> {
326 unsafe {
328 read_to_array::<_, _, N>(|buf| {
329 self.read_exact(buf).map(|bytes_read| debug_assert_eq!(bytes_read, buf.len()))
330 })
331 }
332 }
333
334 fn read_to_object<T: FromBytes>(&mut self) -> Result<T, Errno> {
339 unsafe {
342 read_to_object_as_bytes(|buf| {
343 if self.read(buf)? != buf.len() { error!(EINVAL) } else { Ok(()) }
344 })
345 }
346 }
347}
348
349impl InputBufferExt for dyn InputBuffer + '_ {}
350impl<T: InputBuffer> InputBufferExt for T {}
351
352pub struct UserBuffersOutputBuffer<'a, M> {
354 mm: &'a M,
355 buffers: UserBuffers,
356 available: usize,
357 bytes_written: usize,
358}
359
360impl<'a, M> std::fmt::Debug for UserBuffersOutputBuffer<'a, M> {
361 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
362 f.debug_struct("UserBuffersOutputBuffer")
363 .field("buffers", &self.buffers)
364 .field("available", &self.available)
365 .field("bytes_written", &self.bytes_written)
366 .finish()
367 }
368}
369
370impl<'a, M: TaskMemoryAccessor> UserBuffersOutputBuffer<'a, M> {
371 fn new_inner(mm: &'a M, mut buffers: UserBuffers) -> Result<Self, Errno> {
372 let available = UserBuffer::cap_buffers_to_max_rw_count(
373 mm.maximum_valid_address().ok_or_else(|| errno!(EINVAL))?,
374 &mut buffers,
375 )?;
376 buffers.reverse();
378 Ok(Self { mm, buffers, available, bytes_written: 0 })
379 }
380
381 fn write_each_inner<B: AsRef<[u8]>, F: FnMut(usize) -> Result<B, Errno>>(
382 &mut self,
383 mut callback: F,
384 ) -> Result<usize, Errno> {
385 let mut bytes_written = 0;
386 while let Some(mut buffer) = self.buffers.pop() {
387 if buffer.is_null() {
388 continue;
389 }
390
391 let bytes = callback(buffer.length)?;
392 let bytes = bytes.as_ref();
393
394 bytes_written += self.mm.write_memory(buffer.address, bytes)?;
395 let bytes_len = bytes.len();
396 buffer.advance(bytes_len)?;
397 self.available -= bytes_len;
398 self.bytes_written += bytes_len;
399 if !buffer.is_empty() {
400 self.buffers.push(buffer);
401 break;
402 }
403 }
404 Ok(bytes_written)
405 }
406}
407
408impl<'a> UserBuffersOutputBuffer<'a, CurrentTask> {
409 pub fn unified_new(task: &'a CurrentTask, buffers: UserBuffers) -> Result<Self, Errno> {
410 Self::new_inner(task, buffers)
411 }
412
413 pub fn unified_new_at(
414 task: &'a CurrentTask,
415 address: UserAddress,
416 length: usize,
417 ) -> Result<Self, Errno> {
418 Self::unified_new(task, smallvec![UserBuffer { address, length }])
419 }
420}
421
422impl<'a> UserBuffersOutputBuffer<'a, Task> {
423 pub fn syscall_new(task: &'a Task, buffers: UserBuffers) -> Result<Self, Errno> {
424 Self::new_inner(task, buffers)
425 }
426}
427
428impl<'a> UserBuffersOutputBuffer<'a, RemoteMemoryManager> {
429 pub fn remote_new(mm: &'a RemoteMemoryManager, buffers: UserBuffers) -> Result<Self, Errno> {
430 Self::new_inner(mm, buffers)
431 }
432}
433
434impl<'a, M: TaskMemoryAccessor> Buffer for UserBuffersOutputBuffer<'a, M> {
435 fn segments_count(&self) -> Result<usize, Errno> {
436 Ok(self.buffers.len())
437 }
438
439 fn peek_each_segment(
440 &mut self,
441 callback: &mut PeekBufferSegmentsCallback<'_>,
442 ) -> Result<(), Errno> {
443 for buffer in self.buffers.iter().rev() {
447 if buffer.is_null() {
448 continue;
449 }
450 callback(buffer)
451 }
452
453 Ok(())
454 }
455}
456
457impl<'a, M: TaskMemoryAccessor> OutputBuffer for UserBuffersOutputBuffer<'a, M> {
458 fn write(&mut self, mut bytes: &[u8]) -> Result<usize, Errno> {
459 self.write_each_inner(|buflen| {
460 let bytes_len = std::cmp::min(bytes.len(), buflen);
461 let (to_write, remaining) = bytes.split_at(bytes_len);
462 bytes = remaining;
463 Ok(to_write)
464 })
465 }
466
467 fn write_each(&mut self, callback: &mut OutputBufferCallback<'_>) -> Result<usize, Errno> {
468 self.write_each_inner(|buflen| {
469 unsafe {
471 read_to_vec::<u8, _>(buflen, |buf| {
472 let result = callback(buf)?;
473 if result > buflen {
474 return error!(EINVAL);
475 }
476 Ok(NumberOfElementsRead(result))
477 })
478 }
479 })
480 }
481
482 fn available(&self) -> usize {
483 self.available
484 }
485
486 fn bytes_written(&self) -> usize {
487 self.bytes_written
488 }
489
490 fn zero(&mut self) -> Result<usize, Errno> {
491 let mut bytes_written = 0;
492 while let Some(mut buffer) = self.buffers.pop() {
493 if buffer.is_null() {
494 continue;
495 }
496
497 let count = self.mm.zero(buffer.address, buffer.length)?;
498 buffer.advance(count)?;
499 bytes_written += count;
500
501 self.available -= count;
502 self.bytes_written += count;
503
504 if !buffer.is_empty() {
505 self.buffers.push(buffer);
506 break;
507 }
508 }
509
510 Ok(bytes_written)
511 }
512
513 unsafe fn advance(&mut self, mut length: usize) -> Result<(), Errno> {
514 if length > self.available() {
515 return error!(EINVAL);
516 }
517
518 while let Some(mut buffer) = self.buffers.pop() {
519 if buffer.is_null() {
520 continue;
521 }
522
523 let advance_by = std::cmp::min(length, buffer.length);
524 buffer.advance(advance_by)?;
525 self.available -= advance_by;
526 self.bytes_written += advance_by;
527 if !buffer.is_empty() {
528 self.buffers.push(buffer);
529 break;
530 }
531 length -= advance_by;
532 }
533
534 Ok(())
535 }
536}
537
538pub struct UserBuffersInputBuffer<'a, M> {
540 mm: &'a M,
541 buffers: UserBuffers,
542 available: usize,
543 bytes_read: usize,
544}
545
546impl<'a, M> std::fmt::Debug for UserBuffersInputBuffer<'a, M> {
547 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
548 f.debug_struct("UserBuffersInputBuffer")
549 .field("buffers", &self.buffers)
550 .field("available", &self.available)
551 .field("bytes_read", &self.bytes_read)
552 .finish()
553 }
554}
555
556impl<'a, M: TaskMemoryAccessor> UserBuffersInputBuffer<'a, M> {
557 fn new_inner(mm: &'a M, mut buffers: UserBuffers) -> Result<Self, Errno> {
558 let available = UserBuffer::cap_buffers_to_max_rw_count(
559 mm.maximum_valid_address().ok_or_else(|| errno!(EINVAL))?,
560 &mut buffers,
561 )?;
562 buffers.reverse();
564 Ok(Self { mm, buffers, available, bytes_read: 0 })
565 }
566
567 fn peek_each_inner<F: FnMut(&UserBuffer, usize) -> Result<usize, Errno>>(
568 &mut self,
569 mut callback: F,
570 ) -> Result<usize, Errno> {
571 let mut read = 0;
572 for buffer in self.buffers.iter().rev() {
573 if buffer.is_null() {
574 continue;
575 }
576
577 let result = callback(buffer, read)?;
578 if result > buffer.length {
579 return error!(EINVAL);
580 }
581 read += result;
582 if result != buffer.length {
583 break;
584 }
585 }
586 Ok(read)
587 }
588}
589
590impl<'a> UserBuffersInputBuffer<'a, CurrentTask> {
591 pub fn unified_new(task: &'a CurrentTask, buffers: UserBuffers) -> Result<Self, Errno> {
592 Self::new_inner(task, buffers)
593 }
594
595 pub fn unified_new_at(
596 task: &'a CurrentTask,
597 address: UserAddress,
598 length: usize,
599 ) -> Result<Self, Errno> {
600 Self::unified_new(task, smallvec![UserBuffer { address, length }])
601 }
602}
603
604impl<'a> UserBuffersInputBuffer<'a, Task> {
605 pub fn syscall_new(task: &'a Task, buffers: UserBuffers) -> Result<Self, Errno> {
606 Self::new_inner(task, buffers)
607 }
608}
609
610impl<'a> UserBuffersInputBuffer<'a, RemoteMemoryManager> {
611 pub fn remote_new(mm: &'a RemoteMemoryManager, buffers: UserBuffers) -> Result<Self, Errno> {
612 Self::new_inner(mm, buffers)
613 }
614}
615
616impl<'a, M: TaskMemoryAccessor> Buffer for UserBuffersInputBuffer<'a, M> {
617 fn segments_count(&self) -> Result<usize, Errno> {
618 Ok(self.buffers.iter().filter(|b| b.is_null()).count())
619 }
620
621 fn peek_each_segment(
622 &mut self,
623 callback: &mut PeekBufferSegmentsCallback<'_>,
624 ) -> Result<(), Errno> {
625 for buffer in self.buffers.iter().rev() {
629 if buffer.is_null() {
630 continue;
631 }
632 callback(buffer)
633 }
634
635 Ok(())
636 }
637}
638
639impl<'a, M: TaskMemoryAccessor> InputBuffer for UserBuffersInputBuffer<'a, M> {
640 fn peek(&mut self, uninit_bytes: &mut [MaybeUninit<u8>]) -> Result<usize, Errno> {
641 self.peek_each_inner(|buffer, read_so_far| {
642 let read_to = &mut uninit_bytes[read_so_far..];
643 let read_count = std::cmp::min(buffer.length, read_to.len());
644 let read_to = &mut read_to[..read_count];
645 let read_bytes = self.mm.read_memory(buffer.address, read_to)?;
646 debug_assert_eq!(read_bytes.len(), read_count);
647 Ok(read_count)
648 })
649 }
650
651 fn peek_each(&mut self, callback: &mut InputBufferCallback<'_>) -> Result<usize, Errno> {
652 self.peek_each_inner(|buffer, _read_so_far| {
653 let bytes = self.mm.read_memory_to_vec(buffer.address, buffer.length)?;
654 callback(&bytes)
655 })
656 }
657
658 fn drain(&mut self) -> usize {
659 let result = self.available;
660 self.bytes_read += self.available;
661 self.available = 0;
662 self.buffers.clear();
663 result
664 }
665
666 fn advance(&mut self, mut length: usize) -> Result<(), Errno> {
667 if length > self.available {
668 return error!(EINVAL);
669 }
670 self.available -= length;
671 self.bytes_read += length;
672 while let Some(mut buffer) = self.buffers.pop() {
673 if length < buffer.length {
674 buffer.advance(length)?;
675 self.buffers.push(buffer);
676 return Ok(());
677 }
678 length -= buffer.length;
679 if length == 0 {
680 return Ok(());
681 }
682 }
683 if length != 0 { error!(EINVAL) } else { Ok(()) }
684 }
685
686 fn available(&self) -> usize {
687 self.available
688 }
689 fn bytes_read(&self) -> usize {
690 self.bytes_read
691 }
692}
693
694#[derive(Debug)]
696pub struct VecOutputBuffer {
697 buffer: Vec<u8>,
698 capacity: usize,
702}
703
704impl VecOutputBuffer {
705 pub fn new(capacity: usize) -> Self {
706 Self { buffer: Vec::with_capacity(capacity), capacity }
707 }
708
709 pub fn data(&self) -> &[u8] {
710 &self.buffer
711 }
712
713 pub fn reset(&mut self) {
714 self.buffer.truncate(0)
715 }
716}
717
718impl From<VecOutputBuffer> for Vec<u8> {
719 fn from(data: VecOutputBuffer) -> Self {
720 data.buffer
721 }
722}
723
724impl Buffer for VecOutputBuffer {
725 fn segments_count(&self) -> Result<usize, Errno> {
726 Ok(1)
727 }
728
729 fn peek_each_segment(
730 &mut self,
731 callback: &mut PeekBufferSegmentsCallback<'_>,
732 ) -> Result<(), Errno> {
733 let current_len = self.buffer.len();
734 let buffer = &mut self.buffer.spare_capacity_mut()[..self.capacity - current_len];
735 callback(&UserBuffer {
736 address: UserAddress::from(buffer.as_mut_ptr() as u64),
737 length: buffer.len(),
738 });
739
740 Ok(())
741 }
742}
743
744impl OutputBuffer for VecOutputBuffer {
745 fn write_each(&mut self, callback: &mut OutputBufferCallback<'_>) -> Result<usize, Errno> {
746 let current_len = self.buffer.len();
747 let written =
748 callback(&mut self.buffer.spare_capacity_mut()[..self.capacity - current_len])?;
749 if current_len + written > self.capacity {
750 return error!(EINVAL);
751 }
752 unsafe { self.buffer.set_len(current_len + written) }
754 Ok(written)
755 }
756
757 fn available(&self) -> usize {
758 self.capacity - self.buffer.len()
759 }
760
761 fn bytes_written(&self) -> usize {
762 self.buffer.len()
763 }
764
765 fn zero(&mut self) -> Result<usize, Errno> {
766 let zeroed = self.capacity - self.buffer.len();
767 self.buffer.resize(self.capacity, 0);
768 Ok(zeroed)
769 }
770
771 unsafe fn advance(&mut self, length: usize) -> Result<(), Errno> {
772 if length > self.available() {
773 return error!(EINVAL);
774 }
775
776 self.capacity -= length;
777 let current_len = self.buffer.len();
778 unsafe { self.buffer.set_len(current_len + length) };
784 Ok(())
785 }
786}
787
788#[derive(Debug)]
790pub struct VecInputBuffer {
791 buffer: Vec<u8>,
792
793 bytes_read: usize,
795}
796
797impl VecInputBuffer {
798 pub fn new(buffer: &[u8]) -> Self {
799 Self { buffer: buffer.to_vec(), bytes_read: 0 }
800 }
801}
802
803impl From<Vec<u8>> for VecInputBuffer {
804 fn from(buffer: Vec<u8>) -> Self {
805 Self { buffer, bytes_read: 0 }
806 }
807}
808
809impl Buffer for VecInputBuffer {
810 fn segments_count(&self) -> Result<usize, Errno> {
811 Ok(1)
812 }
813
814 fn peek_each_segment(
815 &mut self,
816 callback: &mut PeekBufferSegmentsCallback<'_>,
817 ) -> Result<(), Errno> {
818 let buffer = &self.buffer[self.bytes_read..];
819 callback(&UserBuffer {
820 address: UserAddress::from(buffer.as_ptr() as u64),
821 length: buffer.len(),
822 });
823
824 Ok(())
825 }
826}
827
828impl InputBuffer for VecInputBuffer {
829 fn peek_each(&mut self, callback: &mut InputBufferCallback<'_>) -> Result<usize, Errno> {
830 let read = callback(&self.buffer[self.bytes_read..])?;
831 if self.bytes_read + read > self.buffer.len() {
832 return error!(EINVAL);
833 }
834 debug_assert!(self.bytes_read <= self.buffer.len());
835 Ok(read)
836 }
837 fn advance(&mut self, length: usize) -> Result<(), Errno> {
838 if length > self.buffer.len() {
839 return error!(EINVAL);
840 }
841 self.bytes_read += length;
842 debug_assert!(self.bytes_read <= self.buffer.len());
843 Ok(())
844 }
845 fn available(&self) -> usize {
846 self.buffer.len() - self.bytes_read
847 }
848 fn bytes_read(&self) -> usize {
849 self.bytes_read
850 }
851 fn drain(&mut self) -> usize {
852 let result = self.available();
853 self.bytes_read += result;
854 result
855 }
856}
857
858impl VecInputBuffer {
859 pub fn read_object<T: FromBytes>(&mut self) -> Result<T, Errno> {
863 let size = std::mem::size_of::<T>();
864 let end = self.bytes_read + size;
865 if end > self.buffer.len() {
866 return error!(EINVAL);
867 }
868 let obj =
869 T::read_from_bytes(&self.buffer[self.bytes_read..end]).map_err(|_| errno!(EINVAL))?;
870 self.bytes_read = end;
871 debug_assert!(self.bytes_read <= self.buffer.len());
872 Ok(obj)
873 }
874}
875
876#[cfg(test)]
877mod tests {
878 use super::*;
879 use crate::mm::{MemoryAccessor as _, PAGE_SIZE};
880 use crate::testing::*;
881 use usercopy::slice_to_maybe_uninit_mut;
882
883 #[test]
884 fn test_data_input_buffer() {
885 let mut executor = fuchsia_async::TestExecutor::new();
886 executor.run_singlethreaded(async {
887 spawn_kernel_and_run(async |locked, current_task| {
888 let page_size = *PAGE_SIZE;
889 let addr =
890 map_memory(locked, ¤t_task, UserAddress::default(), 64 * page_size);
891
892 let data: Vec<u8> = (0..1024).map(|i| (i % 256) as u8).collect();
893 let mm = current_task.deref();
894 mm.write_memory(addr, &data).expect("failed to write test data");
895
896 let input_iovec = smallvec![
897 UserBuffer { address: addr, length: 25 },
898 UserBuffer {
899 address: (addr + 64usize).expect("Memory mapped OOB!"),
900 length: 12
901 },
902 ];
903
904 {
906 let mut input_buffer =
907 UserBuffersInputBuffer::unified_new(mm, input_iovec.clone())
908 .expect("UserBuffersInputBuffer");
909 assert!(input_buffer.peek_each(&mut |data| Ok(data.len() + 1)).is_err());
910 }
911
912 {
914 let mut input_buffer =
915 UserBuffersInputBuffer::unified_new(mm, input_iovec.clone())
916 .expect("UserBuffersInputBuffer");
917 assert_eq!(input_buffer.available(), 37);
918 assert_eq!(input_buffer.bytes_read(), 0);
919 assert_eq!(input_buffer.drain(), 37);
920 assert_eq!(input_buffer.available(), 0);
921 assert_eq!(input_buffer.bytes_read(), 37);
922 }
923
924 {
926 let mut input_buffer =
927 UserBuffersInputBuffer::unified_new(mm, input_iovec.clone())
928 .expect("UserBuffersInputBuffer");
929 assert_eq!(input_buffer.available(), 37);
930 assert_eq!(input_buffer.bytes_read(), 0);
931 let buffer = input_buffer.read_all().expect("read_all");
932 assert_eq!(input_buffer.available(), 0);
933 assert_eq!(input_buffer.bytes_read(), 37);
934 assert_eq!(buffer.len(), 37);
935 assert_eq!(&data[..25], &buffer[..25]);
936 assert_eq!(&data[64..76], &buffer[25..37]);
937 }
938
939 {
941 let mut input_buffer = UserBuffersInputBuffer::unified_new(mm, input_iovec)
942 .expect("UserBuffersInputBuffer");
943 let mut buffer = [0; 50];
944 assert_eq!(input_buffer.available(), 37);
945 assert_eq!(input_buffer.bytes_read(), 0);
946 assert_eq!(
947 input_buffer
948 .read_exact(slice_to_maybe_uninit_mut(&mut buffer[0..20]))
949 .expect("read"),
950 20
951 );
952 assert_eq!(input_buffer.available(), 17);
953 assert_eq!(input_buffer.bytes_read(), 20);
954 assert_eq!(
955 input_buffer
956 .read_exact(slice_to_maybe_uninit_mut(&mut buffer[20..37]))
957 .expect("read"),
958 17
959 );
960 assert!(
961 input_buffer
962 .read_exact(slice_to_maybe_uninit_mut(&mut buffer[37..]))
963 .is_err()
964 );
965 assert_eq!(input_buffer.available(), 0);
966 assert_eq!(input_buffer.bytes_read(), 37);
967 assert_eq!(&data[..25], &buffer[..25]);
968 assert_eq!(&data[64..76], &buffer[25..37]);
969 }
970 })
971 .await;
972 });
973 }
974
975 #[test]
976 fn test_data_output_buffer() {
977 let mut executor = fuchsia_async::TestExecutor::new();
978 executor.run_singlethreaded(async {
979 spawn_kernel_and_run(async |locked, current_task| {
980 let page_size = *PAGE_SIZE;
981 let addr =
982 map_memory(locked, ¤t_task, UserAddress::default(), 64 * page_size);
983
984 let output_iovec = smallvec![
985 UserBuffer { address: addr, length: 25 },
986 UserBuffer {
987 address: (addr + 64usize).expect("Memory was mapped OOB!"),
988 length: 12
989 },
990 ];
991
992 let mm = current_task.deref();
993 let data: Vec<u8> = (0..1024).map(|i| (i % 256) as u8).collect();
994
995 {
997 let mut output_buffer =
998 UserBuffersOutputBuffer::unified_new(mm, output_iovec.clone())
999 .expect("UserBuffersOutputBuffer");
1000 assert!(output_buffer.write_each(&mut |data| Ok(data.len() + 1)).is_err());
1001 }
1002
1003 {
1005 let mut output_buffer = UserBuffersOutputBuffer::unified_new(mm, output_iovec)
1006 .expect("UserBuffersOutputBuffer");
1007 assert_eq!(output_buffer.available(), 37);
1008 assert_eq!(output_buffer.bytes_written(), 0);
1009 assert_eq!(output_buffer.write_all(&data[0..20]).expect("write"), 20);
1010 assert_eq!(output_buffer.available(), 17);
1011 assert_eq!(output_buffer.bytes_written(), 20);
1012 assert_eq!(output_buffer.write_all(&data[20..37]).expect("write"), 17);
1013 assert_eq!(output_buffer.available(), 0);
1014 assert_eq!(output_buffer.bytes_written(), 37);
1015 assert!(output_buffer.write_all(&data[37..50]).is_err());
1016
1017 let buffer = current_task
1018 .read_memory_to_array::<128>(addr)
1019 .expect("failed to write test data");
1020 assert_eq!(&data[0..25], &buffer[0..25]);
1021 assert_eq!(&data[25..37], &buffer[64..76]);
1022 }
1023 })
1024 .await;
1025 });
1026 }
1027
1028 #[::fuchsia::test]
1029 fn test_vec_input_buffer() {
1030 let mut input_buffer = VecInputBuffer::new(b"helloworld");
1031 assert!(input_buffer.peek_each(&mut |data| Ok(data.len() + 1)).is_err());
1032
1033 let mut input_buffer = VecInputBuffer::new(b"helloworld");
1034 assert_eq!(input_buffer.bytes_read(), 0);
1035 assert_eq!(input_buffer.available(), 10);
1036 assert_eq!(input_buffer.drain(), 10);
1037 assert_eq!(input_buffer.bytes_read(), 10);
1038 assert_eq!(input_buffer.available(), 0);
1039
1040 let mut input_buffer = VecInputBuffer::new(b"helloworld");
1041 assert_eq!(input_buffer.bytes_read(), 0);
1042 assert_eq!(input_buffer.available(), 10);
1043 assert_eq!(&input_buffer.read_all().expect("read_all"), b"helloworld");
1044 assert_eq!(input_buffer.bytes_read(), 10);
1045 assert_eq!(input_buffer.available(), 0);
1046
1047 let mut input_buffer = VecInputBuffer::new(b"helloworld");
1048 let mut buffer = [0; 5];
1049 assert_eq!(
1050 input_buffer.read_exact(slice_to_maybe_uninit_mut(&mut buffer)).expect("read"),
1051 5
1052 );
1053 assert_eq!(input_buffer.bytes_read(), 5);
1054 assert_eq!(input_buffer.available(), 5);
1055 assert_eq!(&buffer, b"hello");
1056 assert_eq!(
1057 input_buffer.read_exact(slice_to_maybe_uninit_mut(&mut buffer)).expect("read"),
1058 5
1059 );
1060 assert_eq!(input_buffer.bytes_read(), 10);
1061 assert_eq!(input_buffer.available(), 0);
1062 assert_eq!(&buffer, b"world");
1063 assert!(input_buffer.read_exact(slice_to_maybe_uninit_mut(&mut buffer)).is_err());
1064
1065 let mut input_buffer = VecInputBuffer::new(b"hello");
1067 assert_eq!(input_buffer.bytes_read(), 0);
1068 let buffer: [u8; 3] = input_buffer.read_object().expect("read_object");
1069 assert_eq!(&buffer, b"hel");
1070 assert_eq!(input_buffer.bytes_read(), 3);
1071 let buffer: [u8; 2] = input_buffer.read_object().expect("read_object");
1072 assert_eq!(&buffer, b"lo");
1073 assert_eq!(input_buffer.bytes_read(), 5);
1074 assert!(input_buffer.read_object::<[u8; 1]>().is_err());
1075 assert_eq!(input_buffer.bytes_read(), 5);
1076
1077 let mut input_buffer = VecInputBuffer::new(b"hello");
1078 assert_eq!(input_buffer.bytes_read(), 0);
1079 assert!(input_buffer.read_object::<[u8; 100]>().is_err());
1080 assert_eq!(input_buffer.bytes_read(), 0);
1081 }
1082
1083 #[::fuchsia::test]
1084 fn test_vec_output_buffer() {
1085 let mut output_buffer = VecOutputBuffer::new(10);
1086 assert!(output_buffer.write_each(&mut |data| Ok(data.len() + 1)).is_err());
1087 assert_eq!(output_buffer.bytes_written(), 0);
1088 assert_eq!(output_buffer.available(), 10);
1089 assert_eq!(output_buffer.write_all(b"hello").expect("write"), 5);
1090 assert_eq!(output_buffer.bytes_written(), 5);
1091 assert_eq!(output_buffer.available(), 5);
1092 assert_eq!(output_buffer.data(), b"hello");
1093 assert_eq!(output_buffer.write_all(b"world").expect("write"), 5);
1094 assert_eq!(output_buffer.bytes_written(), 10);
1095 assert_eq!(output_buffer.available(), 0);
1096 assert_eq!(output_buffer.data(), b"helloworld");
1097 assert!(output_buffer.write_all(b"foo").is_err());
1098 let data: Vec<u8> = output_buffer.into();
1099 assert_eq!(data, b"helloworld".to_vec());
1100 }
1101
1102 #[::fuchsia::test]
1103 fn test_vec_write_buffer() {
1104 let mut input_buffer = VecInputBuffer::new(b"helloworld");
1105 let mut output_buffer = VecOutputBuffer::new(20);
1106 assert_eq!(output_buffer.write_buffer(&mut input_buffer).expect("write_buffer"), 10);
1107 assert_eq!(output_buffer.data(), b"helloworld");
1108 }
1109}