1use crate::writer::Error;
10use inspect_format::{
11 Block, BlockAccessorExt, BlockAccessorMutExt, BlockIndex, BlockType, Free, ReadBytes, Reserved,
12 WriteBytes, constants, utils,
13};
14use std::cmp::min;
15
16#[derive(Debug)]
18pub struct Heap<T> {
19 pub(crate) container: T,
20 current_size_bytes: usize,
21 free_head_per_order: [BlockIndex; constants::NUM_ORDERS as usize],
22 allocated_blocks: usize,
23 deallocated_blocks: usize,
24 failed_allocations: usize,
25 has_header: bool,
26}
27
28impl<T: ReadBytes + WriteBytes> Heap<T> {
29 pub fn new(container: T) -> Result<Self, Error> {
31 let mut heap = Self::empty(container)?;
32 heap.init_header()?;
33 Ok(heap)
34 }
35
36 pub fn empty(container: T) -> Result<Self, Error> {
38 let mut heap = Heap {
39 container,
40 current_size_bytes: 0,
41 free_head_per_order: [BlockIndex::EMPTY; constants::NUM_ORDERS as usize],
42 allocated_blocks: 0,
43 deallocated_blocks: 0,
44 failed_allocations: 0,
45 has_header: false,
46 };
47 heap.grow_heap(constants::PAGE_SIZE_BYTES)?;
48 Ok(heap)
49 }
50
51 #[inline]
52 fn init_header(&mut self) -> Result<(), Error> {
53 let header_index =
54 self.allocate_block(inspect_format::utils::order_to_size(constants::HEADER_ORDER))?;
55 let heap_current_size = self.current_size_bytes;
56 self.container
57 .block_at_unchecked_mut::<Reserved>(header_index)
58 .become_header(heap_current_size)?;
59 self.has_header = true;
60 Ok(())
61 }
62
63 pub fn current_size(&self) -> usize {
65 self.current_size_bytes
66 }
67
68 pub fn maximum_size(&self) -> usize {
70 self.container.len()
71 }
72
73 pub fn total_allocated_blocks(&self) -> usize {
75 self.allocated_blocks
76 }
77
78 pub fn total_deallocated_blocks(&self) -> usize {
80 self.deallocated_blocks
81 }
82
83 pub fn failed_allocations(&self) -> usize {
85 self.failed_allocations
86 }
87
88 pub fn allocate_block(&mut self, min_size: usize) -> Result<BlockIndex, Error> {
90 let min_fit_order = utils::fit_order(min_size);
91 if min_fit_order >= constants::NUM_ORDERS as usize {
92 return Err(Error::InvalidBlockOrder(min_fit_order));
93 }
94 let min_fit_order = min_fit_order as u8;
95 let order_found = (min_fit_order..constants::NUM_ORDERS)
97 .find(|&i| self.is_free_block(self.free_head_per_order[i as usize], i).is_some());
98 let next_order = match order_found {
99 Some(order) => order,
100 None => {
101 self.grow_heap(self.current_size_bytes + constants::PAGE_SIZE_BYTES)?;
102 constants::NUM_ORDERS - 1
103 }
104 };
105 let block_index = self.free_head_per_order[next_order as usize];
106 while self.container.block_at(block_index).order() > min_fit_order {
107 self.split_block(block_index)?;
108 }
109 self.remove_free(block_index);
110 let _ = self.container.block_at_unchecked_mut::<Free>(block_index).become_reserved();
111 self.allocated_blocks += 1;
112 Ok(block_index)
113 }
114
115 pub fn free_block(&mut self, mut block_index: BlockIndex) -> Result<(), Error> {
117 let block = self.container.block_at(block_index);
118 if block.block_type() == Some(BlockType::Free) {
119 return Err(Error::BlockAlreadyFree(block_index));
120 }
121 let mut buddy_index = buddy(block_index, block.order());
122
123 while self.possible_to_merge(buddy_index, block_index) {
124 self.remove_free(buddy_index);
125 if buddy_index < block_index {
126 std::mem::swap(&mut buddy_index, &mut block_index);
127 }
128 let mut block = self.container.block_at_mut(block_index);
129 let order = block.order();
130 block.set_order(order + 1)?;
131 buddy_index = buddy(block_index, order + 1);
132 }
133 let block = self.container.block_at_unchecked_mut::<Reserved>(block_index);
134 let order = block.order();
135 let _ = block.become_free(self.free_head_per_order[order as usize]);
136 self.free_head_per_order[order as usize] = block_index;
137 self.deallocated_blocks += 1;
138 Ok(())
139 }
140
141 #[inline]
142 fn possible_to_merge(&self, buddy_index: BlockIndex, block_index: BlockIndex) -> bool {
143 self.container
144 .maybe_block_at::<Free>(buddy_index)
145 .map(|buddy_block| {
146 let block = self.container.block_at(block_index);
147 block.order() < constants::NUM_ORDERS - 1 && block.order() == buddy_block.order()
148 })
149 .unwrap_or(false)
150 }
151
152 pub(crate) fn bytes(&self) -> Vec<u8> {
154 self.container.get_slice(self.current_size_bytes).unwrap().to_vec()
155 }
156
157 #[inline]
158 fn grow_heap(&mut self, requested_size: usize) -> Result<(), Error> {
159 let container_size = self.container.len();
160 if requested_size > container_size || requested_size > constants::MAX_VMO_SIZE {
161 self.failed_allocations += 1;
162 return Err(Error::HeapMaxSizeReached);
163 }
164 let new_size = min(container_size, requested_size);
165 let min_index = BlockIndex::from_offset(self.current_size_bytes);
166 let mut last_index = self.free_head_per_order[(constants::NUM_ORDERS - 1) as usize];
167 let mut curr_index =
168 BlockIndex::from_offset(new_size - new_size % constants::PAGE_SIZE_BYTES);
169 loop {
170 curr_index -= BlockIndex::from_offset(constants::MAX_ORDER_SIZE);
171 Block::free(&mut self.container, curr_index, constants::NUM_ORDERS - 1, last_index)
172 .expect("Failed to create free block");
173 last_index = curr_index;
174 if curr_index <= min_index {
175 break;
176 }
177 }
178 self.free_head_per_order[(constants::NUM_ORDERS - 1) as usize] = last_index;
179 self.current_size_bytes = new_size;
180 if self.has_header {
181 self.container
182 .block_at_unchecked_mut(BlockIndex::HEADER)
183 .set_vmo_size(self.current_size_bytes as u32)?;
185 }
186 Ok(())
187 }
188
189 #[inline]
190 fn is_free_block(
191 &mut self,
192 index: BlockIndex,
193 expected_order: u8,
194 ) -> Option<Block<&mut T, Free>> {
195 if (*index as usize) >= self.current_size_bytes / constants::MIN_ORDER_SIZE {
197 return None;
198 }
199 self.container
200 .maybe_block_at_mut::<Free>(index)
201 .filter(|block| block.order() == expected_order)
202 }
203
204 #[inline]
205 fn remove_free(&mut self, block_index: BlockIndex) {
206 let block = self.container.block_at_unchecked::<Free>(block_index);
207 let free_next_index = block.free_next_index();
208 let order = block.order();
209 if order >= constants::NUM_ORDERS {
210 return;
211 }
212 let mut next_index = self.free_head_per_order[order as usize];
213 if next_index == block_index {
214 self.free_head_per_order[order as usize] = free_next_index;
215 return;
216 }
217 while let Some(mut curr_block) = self.is_free_block(next_index, order) {
218 next_index = curr_block.free_next_index();
219 if next_index == block_index {
220 curr_block.set_free_next_index(free_next_index);
221 return;
222 }
223 }
224 }
225
226 #[inline]
227 fn split_block(&mut self, block_index: BlockIndex) -> Result<(), Error> {
228 let block_order = self.container.block_at(block_index).order();
229 if block_order >= constants::NUM_ORDERS {
230 return Err(Error::InvalidBlockOrderAtIndex(block_order, block_index));
231 }
232 self.remove_free(block_index);
233 let buddy_index = buddy(block_index, block_order - 1);
234 let mut block = self.container.block_at_mut(block_index);
235 block.set_order(block_order - 1)?;
236 block.become_free(buddy_index);
237
238 let mut buddy = self.container.block_at_mut(buddy_index);
239 let buddy_order = block_order - 1;
240 buddy.set_order(buddy_order)?;
241 buddy.become_free(self.free_head_per_order[buddy_order as usize]);
242 self.free_head_per_order[buddy_order as usize] = block_index;
243 Ok(())
244 }
245}
246
247fn buddy(index: BlockIndex, order: u8) -> BlockIndex {
248 index ^ BlockIndex::from_offset(utils::order_to_size(order))
249}
250
251#[cfg(test)]
252mod tests {
253 use super::*;
254 use crate::reader::snapshot::{BackingBuffer, BlockIterator};
255 use inspect_format::{BlockType, Container, Header, block_testing};
256
257 #[derive(Debug)]
258 struct BlockDebug {
259 index: BlockIndex,
260 order: u8,
261 block_type: BlockType,
262 }
263
264 fn validate<T: WriteBytes + ReadBytes>(expected: &[BlockDebug], heap: &Heap<T>) {
265 let buffer = BackingBuffer::Bytes(heap.bytes());
266 let actual: Vec<BlockDebug> = BlockIterator::from(&buffer)
267 .map(|block| BlockDebug {
268 order: block.order(),
269 index: block.index(),
270 block_type: block.block_type().unwrap(),
271 })
272 .collect();
273 assert_eq!(expected.len(), actual.len());
274 for (i, result) in actual.iter().enumerate() {
275 assert_eq!(result.block_type, expected[i].block_type);
276 assert_eq!(result.index, expected[i].index);
277 assert_eq!(result.order, expected[i].order);
278 }
279 }
280
281 #[fuchsia::test]
282 fn empty_heap() {
283 let (container, _storage) = Container::read_and_write(4096).unwrap();
284 let heap = Heap::empty(container).unwrap();
285 assert_eq!(heap.current_size_bytes, 4096);
286 assert_eq!(heap.free_head_per_order, [BlockIndex::EMPTY; 8]);
287
288 let expected = [
289 BlockDebug { index: 0.into(), order: 7, block_type: BlockType::Free },
290 BlockDebug { index: 128.into(), order: 7, block_type: BlockType::Free },
291 ];
292 validate(&expected, &heap);
293 assert_eq!(*heap.free_head_per_order[7], 0);
294 assert_eq!(*heap.container.block_at_unchecked::<Free>(0.into()).free_next_index(), 128);
295 assert_eq!(*heap.container.block_at_unchecked::<Free>(128.into()).free_next_index(), 0);
296 assert_eq!(heap.failed_allocations, 0);
297 }
298
299 #[fuchsia::test]
300 fn new_heap() {
301 let (container, _storage) = Container::read_and_write(4096).unwrap();
302 let heap = Heap::new(container).unwrap();
303 assert_eq!(heap.current_size_bytes, 4096);
304 assert_eq!(
305 heap.free_head_per_order,
306 [
307 BlockIndex::from(0),
308 BlockIndex::from(2),
309 BlockIndex::from(4),
310 BlockIndex::from(8),
311 BlockIndex::from(16),
312 BlockIndex::from(32),
313 BlockIndex::from(64),
314 BlockIndex::from(128)
315 ]
316 );
317
318 let expected = [
319 BlockDebug { index: 0.into(), order: 1, block_type: BlockType::Header },
320 BlockDebug { index: 2.into(), order: 1, block_type: BlockType::Free },
321 BlockDebug { index: 4.into(), order: 2, block_type: BlockType::Free },
322 BlockDebug { index: 8.into(), order: 3, block_type: BlockType::Free },
323 BlockDebug { index: 16.into(), order: 4, block_type: BlockType::Free },
324 BlockDebug { index: 32.into(), order: 5, block_type: BlockType::Free },
325 BlockDebug { index: 64.into(), order: 6, block_type: BlockType::Free },
326 BlockDebug { index: 128.into(), order: 7, block_type: BlockType::Free },
327 ];
328 validate(&expected, &heap);
329 assert_eq!(*heap.container.block_at_unchecked::<Free>(128.into()).free_next_index(), 0);
330 assert_eq!(heap.failed_allocations, 0);
331 }
332
333 #[fuchsia::test]
334 fn allocate_and_free() {
335 let (container, _storage) = Container::read_and_write(4096).unwrap();
336 let mut heap = Heap::empty(container).unwrap();
337
338 for i in 0..=5 {
340 let block = heap.allocate_block(constants::MIN_ORDER_SIZE).unwrap();
341 assert_eq!(*block, i);
342 }
343
344 assert!(heap.free_block(BlockIndex::from(2)).is_ok());
346 assert!(heap.free_block(BlockIndex::from(4)).is_ok());
347 assert!(heap.free_block(BlockIndex::from(0)).is_ok());
348
349 let b = heap.allocate_block(constants::MIN_ORDER_SIZE).unwrap();
352 assert_eq!(*b, 0);
353 let b = heap.allocate_block(constants::MIN_ORDER_SIZE).unwrap();
354 assert_eq!(*b, 4);
355 let b = heap.allocate_block(constants::MIN_ORDER_SIZE).unwrap();
356 assert_eq!(*b, 2);
357
358 assert!(heap.free_block(BlockIndex::from(4)).is_ok());
360 assert!(heap.free_block(BlockIndex::from(2)).is_ok());
361 assert!(heap.free_block(BlockIndex::from(3)).is_ok());
362 assert!(heap.free_block(BlockIndex::from(5)).is_ok());
363
364 let expected = [
365 BlockDebug { index: 0.into(), order: 0, block_type: BlockType::Reserved },
366 BlockDebug { index: 1.into(), order: 0, block_type: BlockType::Reserved },
367 BlockDebug { index: 2.into(), order: 1, block_type: BlockType::Free },
368 BlockDebug { index: 4.into(), order: 2, block_type: BlockType::Free },
369 BlockDebug { index: 8.into(), order: 3, block_type: BlockType::Free },
370 BlockDebug { index: 16.into(), order: 4, block_type: BlockType::Free },
371 BlockDebug { index: 32.into(), order: 5, block_type: BlockType::Free },
372 BlockDebug { index: 64.into(), order: 6, block_type: BlockType::Free },
373 BlockDebug { index: 128.into(), order: 7, block_type: BlockType::Free },
374 ];
375 validate(&expected, &heap);
376 assert!(heap.free_head_per_order.iter().enumerate().skip(2).all(|(i, &j)| (1 << i) == *j));
377 let buffer = BackingBuffer::from(heap.bytes());
378 assert!(
379 BlockIterator::from(&buffer).skip(2).all(|b| *b
380 .cast::<Free>()
381 .unwrap()
382 .free_next_index()
383 == 0)
384 );
385
386 assert!(heap.free_block(BlockIndex::from(0)).is_ok());
388 let b = heap.allocate_block(2048).unwrap();
389 assert_eq!(*b, 128);
390
391 assert!(heap.free_block(BlockIndex::from(1)).is_ok());
394 let b = heap.allocate_block(2048).unwrap();
395 assert_eq!(*b, 0);
396
397 let expected = [
398 BlockDebug { index: 0.into(), order: 7, block_type: BlockType::Reserved },
399 BlockDebug { index: 128.into(), order: 7, block_type: BlockType::Reserved },
400 ];
401 validate(&expected, &heap);
402
403 assert!(heap.free_block(BlockIndex::from(0)).is_ok());
406 let b = heap.allocate_block(1024).unwrap();
407 assert_eq!(*b, 0);
408 let b = heap.allocate_block(1024).unwrap();
409 assert_eq!(*b, 64);
410 assert!(heap.free_block(BlockIndex::from(0)).is_ok());
411 assert!(heap.free_block(BlockIndex::from(64)).is_ok());
412
413 let b = heap.allocate_block(2048).unwrap();
416 assert_eq!(*b, 0);
417 assert!(heap.free_block(BlockIndex::from(0)).is_ok());
418
419 let expected = [
420 BlockDebug { index: 0.into(), order: 7, block_type: BlockType::Free },
421 BlockDebug { index: 128.into(), order: 7, block_type: BlockType::Reserved },
422 ];
423 validate(&expected, &heap);
424 assert_eq!(*heap.free_head_per_order[7], 0);
425 assert_eq!(*heap.container.block_at_unchecked::<Free>(0.into()).free_next_index(), 0);
426
427 assert!(heap.free_block(BlockIndex::from(128)).is_ok());
428 let expected = [
429 BlockDebug { index: 0.into(), order: 7, block_type: BlockType::Free },
430 BlockDebug { index: 128.into(), order: 7, block_type: BlockType::Free },
431 ];
432 validate(&expected, &heap);
433 assert_eq!(*heap.free_head_per_order[7], 128);
434 assert_eq!(*heap.container.block_at_unchecked::<Free>(0.into()).free_next_index(), 0);
435 assert_eq!(*heap.container.block_at_unchecked::<Free>(128.into()).free_next_index(), 0);
436 assert_eq!(heap.failed_allocations, 0);
437 }
438
439 #[fuchsia::test]
440 fn allocation_counters_work() {
441 let (container, _storage) = Container::read_and_write(4096).unwrap();
442 let mut heap = Heap::empty(container).unwrap();
443
444 let block_count_to_allocate: usize = 50;
445 for _ in 0..block_count_to_allocate {
446 heap.allocate_block(constants::MIN_ORDER_SIZE).unwrap();
447 }
448
449 assert_eq!(heap.total_allocated_blocks(), block_count_to_allocate);
450
451 let block_count_to_free: usize = 5;
452 for i in 0..block_count_to_free {
453 heap.free_block(BlockIndex::from(i as u32)).unwrap();
454 }
455
456 assert_eq!(heap.total_allocated_blocks(), block_count_to_allocate);
457 assert_eq!(heap.total_deallocated_blocks(), block_count_to_free);
458
459 for i in block_count_to_free..block_count_to_allocate {
460 heap.free_block(BlockIndex::from(i as u32)).unwrap();
461 }
462
463 assert_eq!(heap.total_allocated_blocks(), block_count_to_allocate);
464 assert_eq!(heap.total_deallocated_blocks(), block_count_to_allocate);
465 }
466
467 #[fuchsia::test]
468 fn allocate_merge() {
469 let (container, _storage) = Container::read_and_write(4096).unwrap();
470 let mut heap = Heap::empty(container).unwrap();
471 for i in 0..=3 {
472 let block = heap.allocate_block(constants::MIN_ORDER_SIZE).unwrap();
473 assert_eq!(*block, i);
474 }
475
476 assert!(heap.free_block(BlockIndex::from(2)).is_ok());
477 assert!(heap.free_block(BlockIndex::from(0)).is_ok());
478 assert!(heap.free_block(BlockIndex::from(1)).is_ok());
479
480 let expected = [
481 BlockDebug { index: 0.into(), order: 1, block_type: BlockType::Free },
482 BlockDebug { index: 2.into(), order: 0, block_type: BlockType::Free },
483 BlockDebug { index: 3.into(), order: 0, block_type: BlockType::Reserved },
484 BlockDebug { index: 4.into(), order: 2, block_type: BlockType::Free },
485 BlockDebug { index: 8.into(), order: 3, block_type: BlockType::Free },
486 BlockDebug { index: 16.into(), order: 4, block_type: BlockType::Free },
487 BlockDebug { index: 32.into(), order: 5, block_type: BlockType::Free },
488 BlockDebug { index: 64.into(), order: 6, block_type: BlockType::Free },
489 BlockDebug { index: 128.into(), order: 7, block_type: BlockType::Free },
490 ];
491 validate(&expected, &heap);
492 assert!(heap.free_head_per_order.iter().enumerate().skip(3).all(|(i, &j)| (1 << i) == *j));
493 let buffer = BackingBuffer::from(heap.bytes());
494 assert!(
495 BlockIterator::from(&buffer).skip(3).all(|b| *b
496 .cast::<Free>()
497 .unwrap()
498 .free_next_index()
499 == 0)
500 );
501 assert_eq!(*heap.free_head_per_order[1], 0);
502 assert_eq!(*heap.free_head_per_order[0], 2);
503 assert_eq!(*heap.container.block_at_unchecked::<Free>(0.into()).free_next_index(), 0);
504 assert_eq!(*heap.container.block_at_unchecked::<Free>(2.into()).free_next_index(), 0);
505
506 assert!(heap.free_block(BlockIndex::from(3)).is_ok());
507 let expected = [
508 BlockDebug { index: 0.into(), order: 7, block_type: BlockType::Free },
509 BlockDebug { index: 128.into(), order: 7, block_type: BlockType::Free },
510 ];
511 validate(&expected, &heap);
512 assert_eq!(*heap.free_head_per_order[1], 0);
513 assert_eq!(*heap.container.block_at_unchecked::<Free>(0.into()).free_next_index(), 128);
514 assert_eq!(*heap.container.block_at_unchecked::<Free>(128.into()).free_next_index(), 0);
515 }
516
517 #[fuchsia::test]
518 fn extend() {
519 let (container, _storage) = Container::read_and_write(8 * 2048).unwrap();
520 let mut heap = Heap::empty(container).unwrap();
521
522 let b = heap.allocate_block(2048).unwrap();
523 assert_eq!(*b, 0);
524 let b = heap.allocate_block(2048).unwrap();
525 assert_eq!(*b, 128);
526 let b = heap.allocate_block(2048).unwrap();
527 assert_eq!(*b, 256);
528
529 let expected = [
530 BlockDebug { index: 0.into(), order: 7, block_type: BlockType::Reserved },
531 BlockDebug { index: 128.into(), order: 7, block_type: BlockType::Reserved },
532 BlockDebug { index: 256.into(), order: 7, block_type: BlockType::Reserved },
533 BlockDebug { index: 384.into(), order: 7, block_type: BlockType::Free },
534 ];
535 validate(&expected, &heap);
536 assert_eq!(*heap.free_head_per_order[7], 384);
537 assert_eq!(*heap.container.block_at_unchecked::<Free>(384.into()).free_next_index(), 0);
538
539 let b = heap.allocate_block(2048).unwrap();
540 assert_eq!(*b, 384);
541 let b = heap.allocate_block(2048).unwrap();
542 assert_eq!(*b, 512);
543
544 assert!(heap.free_block(BlockIndex::from(0)).is_ok());
545 assert!(heap.free_block(BlockIndex::from(128)).is_ok());
546 assert!(heap.free_block(BlockIndex::from(256)).is_ok());
547 assert!(heap.free_block(BlockIndex::from(384)).is_ok());
548 assert!(heap.free_block(BlockIndex::from(512)).is_ok());
549
550 let expected = [
551 BlockDebug { index: 0.into(), order: 7, block_type: BlockType::Free },
552 BlockDebug { index: 128.into(), order: 7, block_type: BlockType::Free },
553 BlockDebug { index: 256.into(), order: 7, block_type: BlockType::Free },
554 BlockDebug { index: 384.into(), order: 7, block_type: BlockType::Free },
555 BlockDebug { index: 512.into(), order: 7, block_type: BlockType::Free },
556 BlockDebug { index: 640.into(), order: 7, block_type: BlockType::Free },
557 ];
558 validate(&expected, &heap);
559 assert_eq!(heap.current_size_bytes, 2048 * 4 + 4096);
560 assert_eq!(*heap.free_head_per_order[7], 512);
561 assert_eq!(*heap.container.block_at_unchecked::<Free>(512.into()).free_next_index(), 384);
562 assert_eq!(*heap.container.block_at_unchecked::<Free>(384.into()).free_next_index(), 256);
563 assert_eq!(*heap.container.block_at_unchecked::<Free>(256.into()).free_next_index(), 128);
564 assert_eq!(*heap.container.block_at_unchecked::<Free>(128.into()).free_next_index(), 0);
565 assert_eq!(*heap.container.block_at_unchecked::<Free>(0.into()).free_next_index(), 640);
566 assert_eq!(*heap.container.block_at_unchecked::<Free>(640.into()).free_next_index(), 0);
567 assert_eq!(heap.failed_allocations, 0);
568 }
569
570 #[fuchsia::test]
571 fn extend_error() {
572 let (container, _storage) = Container::read_and_write(4 * 2048).unwrap();
573 let mut heap = Heap::empty(container).unwrap();
574
575 let b = heap.allocate_block(2048).unwrap();
576 assert_eq!(*b, 0);
577 let b = heap.allocate_block(2048).unwrap();
578 assert_eq!(*b, 128);
579 let b = heap.allocate_block(2048).unwrap();
580 assert_eq!(*b, 256);
581
582 let expected = [
583 BlockDebug { index: 0.into(), order: 7, block_type: BlockType::Reserved },
584 BlockDebug { index: 128.into(), order: 7, block_type: BlockType::Reserved },
585 BlockDebug { index: 256.into(), order: 7, block_type: BlockType::Reserved },
586 BlockDebug { index: 384.into(), order: 7, block_type: BlockType::Free },
587 ];
588 validate(&expected, &heap);
589
590 let b = heap.allocate_block(2048).unwrap();
591 assert_eq!(*b, 384);
592 assert_eq!(heap.failed_allocations, 0);
593 assert!(heap.allocate_block(2048).is_err());
594 assert_eq!(heap.failed_allocations, 1);
595 assert!(heap.allocate_block(2048).is_err());
596 assert_eq!(heap.failed_allocations, 2);
597
598 assert!(heap.free_block(BlockIndex::from(0)).is_ok());
599 assert!(heap.free_block(BlockIndex::from(128)).is_ok());
600 assert!(heap.free_block(BlockIndex::from(256)).is_ok());
601 assert!(heap.free_block(BlockIndex::from(384)).is_ok());
602
603 let expected = [
604 BlockDebug { index: 0.into(), order: 7, block_type: BlockType::Free },
605 BlockDebug { index: 128.into(), order: 7, block_type: BlockType::Free },
606 BlockDebug { index: 256.into(), order: 7, block_type: BlockType::Free },
607 BlockDebug { index: 384.into(), order: 7, block_type: BlockType::Free },
608 ];
609 validate(&expected, &heap);
610 }
611
612 #[fuchsia::test]
613 fn extend_vmo_greater_max_size() {
614 let (container, _storage) =
615 Container::read_and_write(constants::MAX_VMO_SIZE + 2048).unwrap();
616 let mut heap = Heap::empty(container).unwrap();
617
618 for n in 0_u32..(constants::MAX_VMO_SIZE / constants::MAX_ORDER_SIZE).try_into().unwrap() {
619 let b = heap.allocate_block(2048).unwrap();
620 assert_eq!(*b, n * 128);
621 }
622 assert_eq!(heap.failed_allocations, 0);
623 assert!(heap.allocate_block(2048).is_err());
624 assert_eq!(heap.failed_allocations, 1);
625
626 for n in 0_u32..(constants::MAX_VMO_SIZE / constants::MAX_ORDER_SIZE).try_into().unwrap() {
627 assert!(heap.free_block(BlockIndex::from(n * 128)).is_ok());
628 }
629 }
630
631 #[fuchsia::test]
632 fn dont_reinterpret_upper_block_contents() {
633 let (container, _storage) = Container::read_and_write(4096).unwrap();
634 let mut heap = Heap::empty(container).unwrap();
635
636 assert_eq!(*heap.allocate_block(constants::MIN_ORDER_SIZE).unwrap(), 0);
638 let b1 = heap.allocate_block(utils::order_to_size(1)).unwrap();
639 assert_eq!(*b1, 2);
640 assert_eq!(*heap.allocate_block(utils::order_to_size(1)).unwrap(), 4);
641
642 {
644 let mut block = heap.container.block_at_mut(3.into());
645 block_testing::override_header(&mut block, 0xffffffff);
646 block_testing::override_payload(&mut block, 0xffffffff);
647 }
648
649 assert!(heap.free_block(b1).is_ok());
651
652 assert_eq!(*heap.allocate_block(constants::MIN_ORDER_SIZE).unwrap(), 1);
654 assert_eq!(*heap.allocate_block(constants::MIN_ORDER_SIZE).unwrap(), 2);
655
656 assert_eq!(*heap.allocate_block(constants::MIN_ORDER_SIZE).unwrap(), 3);
658
659 let expected = [
660 BlockDebug { index: 0.into(), order: 0, block_type: BlockType::Reserved },
661 BlockDebug { index: 1.into(), order: 0, block_type: BlockType::Reserved },
662 BlockDebug { index: 2.into(), order: 0, block_type: BlockType::Reserved },
663 BlockDebug { index: 3.into(), order: 0, block_type: BlockType::Reserved },
664 BlockDebug { index: 4.into(), order: 1, block_type: BlockType::Reserved },
665 BlockDebug { index: 6.into(), order: 1, block_type: BlockType::Free },
666 BlockDebug { index: 8.into(), order: 3, block_type: BlockType::Free },
667 BlockDebug { index: 16.into(), order: 4, block_type: BlockType::Free },
668 BlockDebug { index: 32.into(), order: 5, block_type: BlockType::Free },
669 BlockDebug { index: 64.into(), order: 6, block_type: BlockType::Free },
670 BlockDebug { index: 128.into(), order: 7, block_type: BlockType::Free },
671 ];
672 validate(&expected, &heap);
673 }
674
675 #[fuchsia::test]
676 fn update_header_vmo_size() {
677 let (container, _storage) = Container::read_and_write(3 * 4096).unwrap();
678 let mut heap = Heap::new(container).unwrap();
679 assert_eq!(
680 heap.container
681 .block_at_unchecked::<Header>(BlockIndex::HEADER)
682 .vmo_size()
683 .unwrap()
684 .unwrap() as usize,
685 heap.current_size()
686 );
687 let b = heap.allocate_block(2048).unwrap();
688 assert_eq!(*b, 128);
689 assert_eq!(
690 heap.container
691 .block_at_unchecked::<Header>(BlockIndex::HEADER)
692 .vmo_size()
693 .unwrap()
694 .unwrap() as usize,
695 heap.current_size()
696 );
697 let b = heap.allocate_block(2048).unwrap();
698 assert_eq!(*b, 256);
699 assert_eq!(
700 heap.container
701 .block_at_unchecked::<Header>(BlockIndex::HEADER)
702 .vmo_size()
703 .unwrap()
704 .unwrap() as usize,
705 heap.current_size()
706 );
707 let b = heap.allocate_block(2048).unwrap();
708 assert_eq!(*b, 384);
709 assert_eq!(
710 heap.container
711 .block_at_unchecked::<Header>(BlockIndex::HEADER)
712 .vmo_size()
713 .unwrap()
714 .unwrap() as usize,
715 heap.current_size()
716 );
717
718 let expected = [
719 BlockDebug { index: 0.into(), order: 1, block_type: BlockType::Header },
720 BlockDebug { index: 2.into(), order: 1, block_type: BlockType::Free },
721 BlockDebug { index: 4.into(), order: 2, block_type: BlockType::Free },
722 BlockDebug { index: 8.into(), order: 3, block_type: BlockType::Free },
723 BlockDebug { index: 16.into(), order: 4, block_type: BlockType::Free },
724 BlockDebug { index: 32.into(), order: 5, block_type: BlockType::Free },
725 BlockDebug { index: 64.into(), order: 6, block_type: BlockType::Free },
726 BlockDebug { index: 128.into(), order: 7, block_type: BlockType::Reserved },
727 BlockDebug { index: 256.into(), order: 7, block_type: BlockType::Reserved },
728 BlockDebug { index: 384.into(), order: 7, block_type: BlockType::Reserved },
729 ];
730 validate(&expected, &heap);
731
732 let b = heap.allocate_block(2048).unwrap();
733 assert_eq!(*b, 512);
734 assert_eq!(
735 heap.container
736 .block_at_unchecked::<Header>(BlockIndex::HEADER)
737 .vmo_size()
738 .unwrap()
739 .unwrap() as usize,
740 heap.current_size()
741 );
742 let b = heap.allocate_block(2048).unwrap();
743 assert_eq!(*b, 640);
744 assert_eq!(
745 heap.container
746 .block_at_unchecked::<Header>(BlockIndex::HEADER)
747 .vmo_size()
748 .unwrap()
749 .unwrap() as usize,
750 heap.current_size()
751 );
752 assert_eq!(heap.failed_allocations, 0);
753 assert!(heap.allocate_block(2048).is_err());
754 assert_eq!(
755 heap.container
756 .block_at_unchecked::<Header>(BlockIndex::HEADER)
757 .vmo_size()
758 .unwrap()
759 .unwrap() as usize,
760 heap.current_size()
761 );
762 assert_eq!(heap.failed_allocations, 1);
763
764 assert!(heap.free_block(BlockIndex::from(128)).is_ok());
765 assert!(heap.free_block(BlockIndex::from(256)).is_ok());
766 assert!(heap.free_block(BlockIndex::from(384)).is_ok());
767 assert!(heap.free_block(BlockIndex::from(512)).is_ok());
768 assert!(heap.free_block(BlockIndex::from(640)).is_ok());
769 assert_eq!(
770 heap.container
771 .block_at_unchecked::<Header>(BlockIndex::HEADER)
772 .vmo_size()
773 .unwrap()
774 .unwrap() as usize,
775 heap.current_size()
776 );
777
778 assert!(heap.free_block(BlockIndex::HEADER).is_ok());
779
780 let expected = [
781 BlockDebug { index: 0.into(), order: 7, block_type: BlockType::Free },
782 BlockDebug { index: 128.into(), order: 7, block_type: BlockType::Free },
783 BlockDebug { index: 256.into(), order: 7, block_type: BlockType::Free },
784 BlockDebug { index: 384.into(), order: 7, block_type: BlockType::Free },
785 BlockDebug { index: 512.into(), order: 7, block_type: BlockType::Free },
786 BlockDebug { index: 640.into(), order: 7, block_type: BlockType::Free },
787 ];
788 validate(&expected, &heap);
789 }
790}