Skip to main content

rkyv/ser/allocator/
mod.rs

1//! Allocators for serializers to use during serialization.
2
3#[cfg(feature = "alloc")]
4mod alloc;
5mod core;
6
7use ::core::{alloc::Layout, ptr::NonNull};
8use rancor::{Fallible, Strategy};
9
10#[cfg(feature = "alloc")]
11pub use self::alloc::*;
12pub use self::core::*;
13
14/// A serializer that can allocate scratch space.
15///
16/// # Safety
17///
18/// `push_alloc` must return a pointer to unaliased memory which fits the
19/// provided layout.
20pub unsafe trait Allocator<E = <Self as Fallible>::Error> {
21    /// Allocates scratch space of the requested size.
22    ///
23    /// # Safety
24    ///
25    /// `layout` must have non-zero size.
26    unsafe fn push_alloc(&mut self, layout: Layout)
27        -> Result<NonNull<[u8]>, E>;
28
29    /// Deallocates previously allocated scratch space.
30    ///
31    /// # Safety
32    ///
33    /// - The allocations pushed on top of the given allocation must not be
34    ///   popped after calling `pop_alloc`.
35    /// - `layout` must be the same layout that was used to allocate the block
36    ///   of memory for the given pointer.
37    unsafe fn pop_alloc(
38        &mut self,
39        ptr: NonNull<u8>,
40        layout: Layout,
41    ) -> Result<(), E>;
42}
43
44unsafe impl<T: Allocator<E>, E> Allocator<E> for Strategy<T, E> {
45    unsafe fn push_alloc(
46        &mut self,
47        layout: Layout,
48    ) -> Result<NonNull<[u8]>, E> {
49        // SAFETY: The safety requirements for `push_alloc()` are the same as
50        // the requirements for `T::push_alloc`.
51        unsafe { T::push_alloc(self, layout) }
52    }
53
54    unsafe fn pop_alloc(
55        &mut self,
56        ptr: NonNull<u8>,
57        layout: Layout,
58    ) -> Result<(), E> {
59        // SAFETY: The safety requirements for `pop_alloc()` are the same as
60        // the requirements for `T::pop_alloc`.
61        unsafe { T::pop_alloc(self, ptr, layout) }
62    }
63}
64
65/// Statistics for the allocations which occurred during serialization.
66#[derive(Debug)]
67pub struct AllocationStats {
68    bytes_allocated: usize,
69    allocations: usize,
70    /// Returns the maximum number of bytes that were concurrently allocated.
71    pub max_bytes_allocated: usize,
72    /// Returns the maximum number of concurrent allocations.
73    pub max_allocations: usize,
74    /// Returns the maximum alignment of requested allocations.
75    pub max_alignment: usize,
76}
77
78impl AllocationStats {
79    /// Returns the minimum arena capacity required to serialize the same data.
80    ///
81    /// This calculation takes into account packing efficiency for slab
82    /// allocated space. It is not exact, and has an error bound of
83    /// `max_allocations * (max_alignment - 1)` bytes. This should be suitably
84    /// small for most use cases.
85    #[inline]
86    pub fn min_arena_capacity(&self) -> usize {
87        self.max_bytes_allocated + self.min_arena_capacity_max_error()
88    }
89
90    /// Returns the maximum error term for the minimum arena capacity
91    /// calculation.
92    #[inline]
93    pub fn min_arena_capacity_max_error(&self) -> usize {
94        self.max_allocations * (self.max_alignment - 1)
95    }
96}
97
98impl AllocationStats {
99    #[inline]
100    fn push(&mut self, layout: Layout) {
101        self.bytes_allocated += layout.size();
102        self.allocations += 1;
103        self.max_bytes_allocated =
104            usize::max(self.bytes_allocated, self.max_bytes_allocated);
105        self.max_allocations =
106            usize::max(self.allocations, self.max_allocations);
107        self.max_alignment = usize::max(self.max_alignment, layout.align());
108    }
109
110    #[inline]
111    fn pop(&mut self, layout: Layout) {
112        self.bytes_allocated -= layout.size();
113        self.allocations -= 1;
114    }
115}
116
117/// A passthrough allocator that tracks usage.
118pub struct AllocationTracker<T> {
119    inner: T,
120    stats: AllocationStats,
121}
122
123impl<T> AllocationTracker<T> {
124    /// Returns a new allocation tracker wrapping the given allocator.
125    pub fn new(inner: T) -> Self {
126        Self {
127            inner,
128            stats: AllocationStats {
129                bytes_allocated: 0,
130                allocations: 0,
131                max_bytes_allocated: 0,
132                max_allocations: 0,
133                max_alignment: 1,
134            },
135        }
136    }
137
138    /// Returns the allocation stats accumulated during serialization.
139    pub fn into_stats(self) -> AllocationStats {
140        self.stats
141    }
142}
143
144unsafe impl<T: Allocator<E>, E> Allocator<E> for AllocationTracker<T> {
145    unsafe fn push_alloc(
146        &mut self,
147        layout: Layout,
148    ) -> Result<NonNull<[u8]>, E> {
149        self.stats.push(layout);
150        // SAFETY: The safety requirements for `push_alloc` are the same as the
151        // requirements for `inner.push_alloc`.
152        unsafe { self.inner.push_alloc(layout) }
153    }
154
155    unsafe fn pop_alloc(
156        &mut self,
157        ptr: NonNull<u8>,
158        layout: Layout,
159    ) -> Result<(), E> {
160        self.stats.pop(layout);
161        // SAFETY: The safety requirements for `pop_alloc` are the same as the
162        // requirements for `inner.pop_alloc`.
163        unsafe { self.inner.pop_alloc(ptr, layout) }
164    }
165}
166
167impl<T> From<T> for AllocationTracker<T> {
168    fn from(inner: T) -> Self {
169        Self::new(inner)
170    }
171}
172
173#[cfg(test)]
174mod tests {
175    use core::mem::MaybeUninit;
176
177    use rancor::{Panic, Strategy};
178
179    use crate::{
180        api::serialize_using,
181        ser::{
182            allocator::{AllocationStats, AllocationTracker, SubAllocator},
183            sharing::Unshare,
184            writer::Buffer,
185            Serializer,
186        },
187        util::Align,
188        Serialize,
189    };
190
191    type TrackerSerializer<'a> = Strategy<
192        Serializer<Buffer<'a>, AllocationTracker<SubAllocator<'a>>, Unshare>,
193        Panic,
194    >;
195
196    fn track_serialize<T>(value: &T) -> AllocationStats
197    where
198        T: for<'a> Serialize<TrackerSerializer<'a>>,
199    {
200        let mut output = Align([MaybeUninit::<u8>::uninit(); 256]);
201        let mut scratch = [MaybeUninit::<u8>::uninit(); 256];
202
203        let mut serializer = Serializer::new(
204            Buffer::from(&mut *output),
205            AllocationTracker::new(SubAllocator::new(&mut scratch)),
206            Unshare,
207        );
208        serialize_using(value, &mut serializer).unwrap();
209        serializer.into_raw_parts().1.into_stats()
210    }
211
212    #[test]
213    fn simple() {
214        let stats = track_serialize(&42);
215        assert_eq!(stats.max_bytes_allocated, 0);
216        assert_eq!(stats.max_allocations, 0);
217        assert_eq!(stats.max_alignment, 1);
218        assert_eq!(stats.min_arena_capacity(), 0);
219        assert_eq!(stats.min_arena_capacity_max_error(), 0);
220    }
221
222    #[cfg(feature = "alloc")]
223    #[test]
224    fn nested() {
225        use crate::alloc::vec;
226
227        let stats = track_serialize(&vec![1, 2, 3, 4]);
228        assert_eq!(stats.max_bytes_allocated, 0);
229        assert_eq!(stats.max_allocations, 0);
230        assert_eq!(stats.max_alignment, 1);
231        assert_eq!(stats.min_arena_capacity(), 0);
232        assert_eq!(stats.min_arena_capacity_max_error(), 0);
233    }
234
235    #[cfg(feature = "alloc")]
236    #[test]
237    fn doubly_nested() {
238        use crate::alloc::vec;
239
240        let stats = track_serialize(&vec![vec![1, 2], vec![3, 4]]);
241        assert_ne!(stats.max_bytes_allocated, 0);
242        assert_eq!(stats.max_allocations, 1);
243        assert_ne!(stats.min_arena_capacity(), 0);
244    }
245}