osiris/mem/alloc/
bestfit.rs

1use core::{ops::Range, ptr::NonNull};
2
3use hal::mem::PhysAddr;
4
5use crate::error::Result;
6
7/// The metadata that is before any block in the BestFitAllocator.
8struct BestFitMeta {
9    /// The size of the block in bytes.
10    size: usize,
11    /// The pointer to the next free block. This is `None` if the block is allocated.
12    next: Option<NonNull<u8>>,
13}
14
15/// This is an allocator implementation that uses the best fit strategy.
16/// That does mean, when we allocate a block, we try to find the smallest block that fits the requested size.
17/// Blocks are stored in a singly linked list. The important part is that the linked list is stored in-line with the memory blocks.
18/// This means that every block has a header that contains the size of the block and a pointer to the next block.
19#[proc_macros::fmt]
20pub struct BestFitAllocator {
21    /// Head of the free block list.
22    head: Option<NonNull<u8>>,
23}
24
25/// Implementation of the BestFitAllocator.
26impl BestFitAllocator {
27    pub const MIN_RANGE_SIZE: usize = size_of::<BestFitMeta>() + Self::align_up() + 1;
28
29    /// Creates a new BestFitAllocator.
30    ///
31    /// Returns the new BestFitAllocator.
32    pub const fn new() -> Self {
33        Self { head: None }
34    }
35
36    /// Adds a range of memory to the allocator.
37    ///
38    /// `range` - The range of memory to add.
39    ///
40    /// Returns `Ok(())` if the range was added successfully, otherwise an error.
41    ///
42    /// # Safety
43    ///
44    /// The range must be valid, 128bit aligned and must not overlapping with any other current or future range.
45    /// The range must also be at least as large as `MIN_RANGE_SIZE`.
46    /// Also the range must stay valid, for the whole lifetime of the allocator. Also the lifetime of any allocation is only valid as long as the allocator is valid.
47    pub unsafe fn add_range(&mut self, range: &Range<PhysAddr>) -> Result<()> {
48        let ptr = range.start;
49
50        // Check if the pointer is 128bit aligned.
51        if !ptr.is_multiple_of(align_of::<u128>()) {
52            return Err(kerr!(InvalidArgument));
53        }
54
55        if range.end.diff(range.start) < Self::MIN_RANGE_SIZE {
56            return Err(kerr!(InvalidArgument));
57        }
58
59        debug_assert!(range.end > range.start);
60        debug_assert!(range.end.diff(range.start) > size_of::<BestFitMeta>() + Self::align_up());
61        debug_assert!(range.end.as_usize() <= isize::MAX as usize);
62
63        // The user pointer is the pointer to the user memory. So we need to add the size of the meta data and possibly add padding.
64        let user_pointer = ptr + size_of::<BestFitMeta>() + Self::align_up();
65
66        // Set the current head as the next block, so we can add the new block to the head.
67        let meta = BestFitMeta {
68            size: range.end.diff(user_pointer),
69            next: self.head,
70        };
71
72        // Write the header to the memory.
73        unsafe { core::ptr::write(ptr.as_mut_ptr::<BestFitMeta>(), meta) };
74
75        // Set the head to the new block.
76        self.head = Some(unsafe { NonNull::new_unchecked(ptr.as_mut_ptr::<u8>()) });
77        Ok(())
78    }
79
80    /// Calculates the padding required to align the block. Note: We only align to 128bit.
81    ///
82    /// Returns the padding in bytes.
83    const fn align_up() -> usize {
84        let meta = size_of::<BestFitMeta>();
85        let align = align_of::<u128>();
86        // Calculate the padding required to align the block.
87        (align - (meta % align)) % align
88    }
89
90    /// Selects the best fit block for the given size.
91    ///
92    /// `size` - The size of the block.
93    ///
94    /// Returns the control pointer to the block and the control pointer to the previous block.
95    fn select_block(
96        &mut self,
97        size: usize,
98        requested: Option<PhysAddr>,
99    ) -> Result<(NonNull<u8>, Option<NonNull<u8>>)> {
100        let mut best_fit = Err(kerr!(OutOfMemory));
101        let mut best_fit_size = usize::MAX;
102
103        let mut current = self.head;
104        let mut prev = None;
105
106        if let Some(requested) = requested {
107            while let Some(ptr) = current {
108                // Get the metadata of the block.
109                let meta = unsafe { ptr.cast::<BestFitMeta>().as_ref() };
110
111                if unsafe { Self::contains(meta, requested, size) } {
112                    return Ok((ptr, prev));
113                }
114
115                // Move to the next block.
116                prev = current;
117                current = meta.next;
118            }
119        }
120
121        // Iterate over all blocks and find the best fit.
122        while let Some(ptr) = current {
123            // Get the metadata of the block.
124            let meta = unsafe { ptr.cast::<BestFitMeta>().as_ref() };
125
126            // Check if the block is big enough and smaller than the current best fit.
127            if meta.size >= size && meta.size <= best_fit_size {
128                best_fit = Ok((ptr, prev));
129                best_fit_size = meta.size;
130            }
131
132            // Move to the next block.
133            prev = current;
134            current = meta.next;
135        }
136
137        best_fit
138    }
139
140    /// Calculates the user pointer from the control pointer.
141    ///
142    /// `ptr` - The control pointer.
143    ///
144    /// Returns the user pointer.
145    ///
146    /// # Safety
147    ///
148    /// The ptr must be a valid control pointer. Note: After the allocator which allocated the pointer is dropped, the control pointer is always considered invalid.
149    unsafe fn user_ptr(ptr: NonNull<u8>) -> NonNull<u8> {
150        debug_assert!(
151            (ptr.as_ptr() as usize)
152                <= isize::MAX as usize - size_of::<BestFitMeta>() - Self::align_up()
153        );
154        unsafe { ptr.byte_add(size_of::<BestFitMeta>() + Self::align_up()) }
155    }
156
157    /// Calculates the control pointer from the user pointer.
158    ///
159    /// `ptr` - The user pointer.
160    ///
161    /// Returns the control pointer.
162    ///
163    /// # Safety
164    ///
165    /// The ptr must be a valid user pointer. Note: After the allocator which allocated the pointer is dropped, the user pointer is always considered invalid.
166    unsafe fn control_ptr(ptr: NonNull<u8>) -> NonNull<u8> {
167        debug_assert!((ptr.as_ptr() as usize) > size_of::<BestFitMeta>() + Self::align_up());
168        unsafe { ptr.byte_sub(size_of::<BestFitMeta>() + Self::align_up()) }
169    }
170
171    unsafe fn contains(meta: &BestFitMeta, target: PhysAddr, size: usize) -> bool {
172        let begin = unsafe {
173            Self::user_ptr(NonNull::new_unchecked(
174                meta as *const BestFitMeta as *mut u8,
175            ))
176        };
177        debug_assert!(size > 0);
178
179        if target >= begin.into() {
180            if let Some(target) = target.checked_add(size) {
181                if target > (unsafe { begin.add(meta.size) }).into() {
182                    return false;
183                }
184            } else {
185                return false;
186            }
187            return true;
188        }
189        false
190    }
191}
192
193/// Implementation of the Allocator trait for BestFitAllocator.
194impl super::Allocator for BestFitAllocator {
195    /// Allocates a block of memory with the given size and alignment. Note: This function will always yield an invalid align for align > 128bit.
196    ///
197    /// `size` - The size of the block.
198    /// `align` - The alignment of the block.
199    ///
200    /// Returns the user pointer to the block if successful, otherwise an error.
201    fn malloc<T>(
202        &mut self,
203        size: usize,
204        align: usize,
205        request: Option<PhysAddr>,
206    ) -> Result<NonNull<T>> {
207        // Check if the alignment is valid.
208        if align == 0 || align > align_of::<u128>() {
209            return Err(kerr!(InvalidAlign));
210        }
211
212        if let Some(request) = request {
213            if !request.is_multiple_of(align) {
214                return Err(kerr!(InvalidAlign));
215            }
216        }
217
218        // Check if the size is valid.
219        if size == 0 {
220            return Err(kerr!(InvalidArgument));
221        }
222
223        // For some cfg this warning is correct. But for others its not.
224        #[allow(clippy::absurd_extreme_comparisons)]
225        if size >= super::MAX_ADDR {
226            return Err(kerr!(InvalidArgument));
227        }
228
229        // Align the size.
230        let aligned_size = super::super::align_up(size);
231        debug_assert!(aligned_size >= size);
232        debug_assert!(aligned_size <= isize::MAX as usize);
233
234        // Find the best fit block.
235        let (split, block, prev) = match self.select_block(aligned_size, request) {
236            Ok((block, prev)) => {
237                // Get the metadata of the block.
238                let meta = unsafe { block.cast::<BestFitMeta>().as_mut() };
239
240                // If we requested a specific address. The size must be extended by the offset from block start to the requested address.
241                let aligned_size = if let Some(request) = request {
242                    aligned_size + request.diff(unsafe { Self::user_ptr(block) }.into())
243                } else {
244                    aligned_size
245                };
246
247                // Calculate the amount of bytes until the beginning of the possibly next metadata.
248                let min = aligned_size.saturating_add(size_of::<BestFitMeta>() + Self::align_up());
249
250                debug_assert!(
251                    (block.as_ptr() as usize)
252                        <= isize::MAX as usize
253                            - meta.size
254                            - size_of::<BestFitMeta>()
255                            - Self::align_up()
256                );
257
258                debug_assert!(
259                    meta.size < isize::MAX as usize - size_of::<BestFitMeta>() - Self::align_up()
260                );
261
262                // If the block is big enough to split. Then it also needs to be big enough to store the metadata + align of the next block.
263                if meta.size > min {
264                    // Calculate the remaining size of the block and thus the next metadata.
265                    let remaining_meta = BestFitMeta {
266                        size: meta.size - min,
267                        next: meta.next,
268                    };
269
270                    // Shrink the current block to the requested aligned_size + padding (which is not available to the user).
271                    meta.size = aligned_size;
272
273                    // Calculate the pointer to the next metadata.
274                    let ptr = unsafe { Self::user_ptr(block).byte_add(aligned_size) };
275
276                    unsafe {
277                        // Write the new metadata to the memory.
278                        ptr.cast::<BestFitMeta>().write(remaining_meta);
279                    }
280
281                    // If there is a previous block, we insert the new block after it. Otherwise we set it as the new head.
282                    if let Some(prev) = prev {
283                        let prev_meta = unsafe { prev.cast::<BestFitMeta>().as_mut() };
284                        prev_meta.next = Some(ptr);
285                    } else {
286                        self.head = Some(ptr);
287                    }
288
289                    // The next block of an allocated block is always None.
290                    meta.next = None;
291
292                    (true, block, prev)
293                } else {
294                    (false, block, prev)
295                }
296            }
297            Err(_) => {
298                let (block, prev) = self.select_block(size, request)?;
299                (false, block, prev)
300            }
301        };
302
303        if !split {
304            // Get the metadata of the block.
305            let meta = unsafe { block.cast::<BestFitMeta>().as_mut() };
306
307            if let Some(prev) = prev {
308                let prev_meta = unsafe { prev.cast::<BestFitMeta>().as_mut() };
309                // If there is a previous block, we remove the current block from the list. Ie. we set the next block of the previous block to the next block of the current block.
310                prev_meta.next = meta.next;
311            } else {
312                // If there is no previous block, we set the next block as the new head.
313                self.head = meta.next;
314            }
315
316            // The next block of an allocated block is always None.
317            meta.next = None;
318        }
319
320        if let Some(request) = request {
321            debug_assert!(unsafe {
322                Self::contains(block.cast::<BestFitMeta>().as_ref(), request, size)
323            });
324        }
325
326        // Return the user pointer.
327        Ok(unsafe { Self::user_ptr(block).cast() })
328    }
329
330    /// Frees a block of memory.
331    ///
332    /// `ptr` - The pointer to the block.
333    /// `size` - The size of the block. (This is used to check if the size of the block is correct.)
334    unsafe fn free<T>(&mut self, ptr: NonNull<T>, size: usize) {
335        let block = unsafe { Self::control_ptr(ptr.cast()) };
336        let meta = unsafe { block.cast::<BestFitMeta>().as_mut() };
337
338        // The next block of a free block is always the current head. We essentially insert the block at the beginning of the list.
339        meta.next = self.head;
340
341        // Check if the size of the block is correct.
342        bug_on!(
343            size > meta.size,
344            "allocation size {} is larger than block size {}",
345            size,
346            meta.size
347        );
348
349        // Set the block as the new head.
350        self.head = Some(block);
351    }
352}
353
354// TESTING ------------------------------------------------------------------------------------------------------------
355
356#[cfg(test)]
357mod tests {
358    use crate::error::Kind;
359    use crate::mem::align_up;
360
361    use super::super::*;
362    use super::*;
363
364    fn verify_block(user_ptr: NonNull<u8>, size: usize, next: Option<NonNull<u8>>) {
365        let control_ptr = unsafe { BestFitAllocator::control_ptr(user_ptr) };
366        let meta = unsafe { control_ptr.cast::<BestFitMeta>().as_ref() };
367
368        assert!(meta.size >= size);
369        assert_eq!(meta.next, next);
370    }
371
372    fn verify_ptrs_not_overlaping(ptrs: &[(NonNull<u8>, usize)]) {
373        for (i, (ptr1, size1)) in ptrs.iter().enumerate() {
374            for (j, (ptr2, size2)) in ptrs.iter().enumerate() {
375                if i == j {
376                    continue;
377                }
378
379                let begin1 = ptr1.as_ptr() as usize;
380                let end1 = begin1 + size1;
381                let begin2 = ptr2.as_ptr() as usize;
382                let end2 = begin2 + size2;
383
384                assert!(end1 <= begin2 || end2 <= begin1);
385                assert!(begin1 != begin2);
386                assert!(end1 != end2);
387                assert!(*size1 > 0);
388                assert!(*size2 > 0);
389                assert!(end1 > begin1);
390                assert!(end2 > begin2);
391            }
392        }
393    }
394
395    fn alloc_range(length: usize) -> Range<PhysAddr> {
396        let alloc_range = std::alloc::Layout::from_size_align(length, align_of::<u128>()).unwrap();
397        let ptr = unsafe { std::alloc::alloc(alloc_range) };
398        PhysAddr::new(ptr as usize)..PhysAddr::new(ptr as usize + length)
399    }
400
401    #[test]
402    fn allocate_one() {
403        let mut allocator = BestFitAllocator::new();
404
405        let range = alloc_range(4096);
406        unsafe {
407            allocator.add_range(&range).unwrap();
408        }
409
410        let ptr = allocator.malloc(128, 1, None).unwrap();
411
412        verify_block(ptr, 128, None);
413    }
414
415    #[test]
416    fn alloc_request() {
417        let mut allocator = BestFitAllocator::new();
418
419        let range = alloc_range(4096);
420        unsafe {
421            allocator.add_range(&range).unwrap();
422        }
423
424        let request = range.start + 128;
425        let ptr = allocator.malloc::<u8>(128, 1, Some(request)).unwrap();
426
427        // Check that the returned pointer contains the requested address.
428        let meta = unsafe {
429            BestFitAllocator::control_ptr(ptr)
430                .cast::<BestFitMeta>()
431                .as_ref()
432        };
433        assert!(unsafe { BestFitAllocator::contains(meta, request, 128) });
434    }
435
436    #[test]
437    fn alloc_request_to_big() {
438        let mut allocator = BestFitAllocator::new();
439
440        let range = alloc_range(4096);
441        unsafe {
442            allocator.add_range(&range).unwrap();
443        }
444
445        let request = range.start + 4096;
446        let ptr = allocator.malloc::<u8>(128, 1, Some(request));
447
448        assert!(ptr.is_err_and(|e| e.kind == Kind::OutOfMemory));
449    }
450
451    #[test]
452    fn alloc_request_not_aligned() {
453        let mut allocator = BestFitAllocator::new();
454
455        let range = alloc_range(4096);
456        unsafe {
457            allocator.add_range(&range).unwrap();
458        }
459
460        let request = range.start + 127;
461        let ptr = allocator.malloc::<u8>(128, 8, Some(request));
462
463        assert!(ptr.is_err_and(|e| e.kind == Kind::InvalidAlign));
464    }
465
466    #[test]
467    fn alloc_request_not_available() {
468        let mut allocator = BestFitAllocator::new();
469
470        let range = alloc_range(4096);
471        unsafe {
472            allocator.add_range(&range).unwrap();
473        }
474
475        let request = range.start + 128;
476        let ptr = allocator.malloc::<u8>(128, 1, Some(request)).unwrap();
477        verify_block(ptr, 128, None);
478
479        let ptr = allocator.malloc::<u8>(128, 1, Some(request));
480        assert!(ptr.is_err_and(|e| e.kind == Kind::OutOfMemory));
481    }
482
483    #[test]
484    fn alloc_request_out_of_range() {
485        let mut allocator = BestFitAllocator::new();
486
487        let range = alloc_range(4096);
488        unsafe {
489            allocator.add_range(&range).unwrap();
490        }
491
492        let request = range.end + 128;
493        let ptr = allocator.malloc::<u8>(128, 1, Some(request));
494
495        assert!(ptr.is_err_and(|e| e.kind == Kind::OutOfMemory));
496    }
497
498    #[test]
499    fn alloc_alot() {
500        let mut allocator = BestFitAllocator::new();
501        const CNT: usize = 100;
502        const SIZE: usize = 128;
503
504        let range = alloc_range(SIZE * CNT * 2);
505        unsafe {
506            allocator.add_range(&range).unwrap();
507        }
508
509        let mut ptrs = Vec::new();
510        for _ in 0..CNT {
511            let ptr = allocator.malloc(SIZE, 1, None).unwrap();
512            verify_block(ptr, SIZE, None);
513            ptrs.push((ptr, SIZE));
514        }
515
516        verify_ptrs_not_overlaping(ptrs.as_slice());
517    }
518
519    #[test]
520    fn alloc_exact() {
521        let mut allocator = BestFitAllocator::new();
522        const CNT: usize = 10;
523        const SIZE: usize = 128;
524
525        let range =
526            alloc_range((SIZE + size_of::<BestFitMeta>() + BestFitAllocator::align_up()) * CNT);
527        unsafe {
528            allocator.add_range(&range).unwrap();
529        }
530
531        let mut ptrs = Vec::new();
532        for _ in 0..CNT {
533            let ptr = allocator.malloc(SIZE, 1, None).unwrap();
534            verify_block(ptr, SIZE, None);
535            ptrs.push((ptr, SIZE));
536        }
537
538        verify_ptrs_not_overlaping(ptrs.as_slice());
539    }
540
541    #[test]
542    fn alloc_oom() {
543        let mut allocator = BestFitAllocator::new();
544        const CNT: usize = 10;
545        const SIZE: usize = 128;
546
547        let range =
548            alloc_range((SIZE + size_of::<BestFitMeta>() + BestFitAllocator::align_up()) * CNT - 1);
549        unsafe {
550            allocator.add_range(&range).unwrap();
551        }
552
553        let mut ptrs = Vec::new();
554        for _ in 0..CNT - 1 {
555            let ptr = allocator.malloc(SIZE, 1, None).unwrap();
556            verify_block(ptr, SIZE, None);
557            ptrs.push(ptr);
558        }
559
560        let ptr = allocator.malloc::<u8>(SIZE, 1, None);
561        assert!(ptr.is_err_and(|e| e.kind == Kind::OutOfMemory));
562    }
563
564    #[test]
565    fn alloc_no_oom_through_free() {
566        let mut allocator = BestFitAllocator::new();
567        const SIZE: usize = 128;
568
569        let range = alloc_range(SIZE + size_of::<BestFitMeta>() + BestFitAllocator::align_up());
570        unsafe {
571            allocator.add_range(&range).unwrap();
572        }
573
574        let ptr = allocator.malloc(SIZE, 1, None).unwrap();
575        verify_block(ptr, SIZE, None);
576
577        unsafe {
578            allocator.free(ptr, SIZE);
579        }
580
581        let ptr = allocator.malloc(SIZE, 1, None).unwrap();
582        verify_block(ptr, SIZE, None);
583    }
584
585    #[test]
586    fn multi_range_alloc() {
587        let mut allocator = BestFitAllocator::new();
588        const CNT: usize = 10;
589        const SIZE: usize = 128;
590
591        let mut ranges = Vec::new();
592        for _ in 0..CNT {
593            let range = alloc_range(SIZE + size_of::<BestFitMeta>() + BestFitAllocator::align_up());
594            unsafe {
595                allocator.add_range(&range).unwrap();
596            }
597            ranges.push(range);
598        }
599
600        let mut ptrs = Vec::new();
601        for _ in 0..CNT {
602            let ptr = allocator.malloc(SIZE, 1, None).unwrap();
603            verify_block(ptr, SIZE, None);
604            ptrs.push((ptr, SIZE));
605        }
606
607        verify_ptrs_not_overlaping(ptrs.as_slice());
608    }
609
610    #[test]
611    fn multi_range_no_oom_through_free() {
612        // This function allocates multiple ranges and then frees one of them randomly. And only then there is no oom.
613        let mut allocator = BestFitAllocator::new();
614
615        const CNT: usize = 10;
616        const SIZE: usize = 128;
617
618        let mut ranges = Vec::new();
619        for _ in 0..CNT {
620            let range = alloc_range(SIZE + size_of::<BestFitMeta>() + BestFitAllocator::align_up());
621            unsafe {
622                allocator.add_range(&range).unwrap();
623            }
624            ranges.push(range);
625        }
626
627        let mut ptrs = Vec::new();
628
629        let ptr = allocator.malloc::<u8>(SIZE, 1, None).unwrap();
630
631        for _ in 0..CNT - 1 {
632            let ptr = allocator.malloc(SIZE, 1, None).unwrap();
633            verify_block(ptr, SIZE, None);
634            ptrs.push((ptr, SIZE));
635        }
636
637        unsafe {
638            allocator.free(ptr, SIZE);
639        }
640
641        let ptr = allocator.malloc(SIZE, 1, None).unwrap();
642        ptrs.push((ptr, SIZE));
643
644        verify_ptrs_not_overlaping(ptrs.as_slice());
645    }
646
647    #[test]
648    fn free_corrupts_metadata() {
649        let mut allocator = BestFitAllocator::new();
650        const SIZE: usize = 17;
651        const ALIGNED: usize = 32;
652        assert!(align_up(SIZE) == ALIGNED);
653
654        let range = alloc_range(ALIGNED + size_of::<BestFitMeta>() + BestFitAllocator::align_up());
655        unsafe {
656            allocator.add_range(&range).unwrap();
657        }
658
659        let ptr1: core::ptr::NonNull<u8> = allocator.malloc(SIZE, 1, None).unwrap();
660
661        unsafe {
662            allocator.free(ptr1, SIZE);
663        }
664
665        let ptr2: core::ptr::NonNull<u8> = allocator
666            .malloc(SIZE, 1, None)
667            .expect("second malloc should succeed via fallback path");
668
669        unsafe {
670            allocator.free(ptr2, SIZE);
671        }
672    }
673
674    #[test]
675    fn multi_range_oom() {
676        // This function allocates multiple ranges and then frees one of them randomly. And only then there is no oom.
677        let mut allocator = BestFitAllocator::new();
678
679        const CNT: usize = 10;
680        const SIZE: usize = 128;
681
682        let mut ranges = Vec::new();
683        for _ in 0..CNT {
684            let range = alloc_range(SIZE + size_of::<BestFitMeta>() + BestFitAllocator::align_up());
685            unsafe {
686                allocator.add_range(&range).unwrap();
687            }
688            ranges.push(range);
689        }
690
691        let mut ptrs = Vec::new();
692
693        for _ in 0..CNT {
694            let ptr = allocator.malloc(SIZE, 1, None).unwrap();
695            verify_block(ptr, SIZE, None);
696            ptrs.push((ptr, SIZE));
697        }
698
699        let ptr = allocator.malloc::<u8>(SIZE, 1, None);
700        assert!(ptr.is_err_and(|e| e.kind == Kind::OutOfMemory));
701
702        verify_ptrs_not_overlaping(ptrs.as_slice());
703    }
704}
705
706// END TESTING --------------------------------------------------------------------------------------------------------
707
708// VERIFICATION -------------------------------------------------------------------------------------------------------
709#[cfg(kani)]
710mod verification {
711    use super::*;
712    use crate::mem::alloc::Allocator;
713    use crate::mem::alloc::MAX_ADDR;
714
715    fn verify_block(user_ptr: NonNull<u8>, size: usize, next: Option<NonNull<u8>>) {
716        let control_ptr = unsafe { BestFitAllocator::control_ptr(user_ptr) };
717        let meta = unsafe { control_ptr.cast::<BestFitMeta>().as_ref() };
718
719        assert!(meta.size >= size);
720        assert_eq!(meta.next, next);
721    }
722
723    fn alloc_range(length: usize) -> Option<Range<PhysAddr>> {
724        let alloc_range = std::alloc::Layout::from_size_align(length, align_of::<u128>()).unwrap();
725        let ptr = unsafe { std::alloc::alloc(alloc_range) };
726
727        if ptr.is_null() || ((ptr as usize) >= isize::MAX as usize - length) {
728            None
729        } else {
730            Some(PhysAddr::new(ptr as usize)..PhysAddr::new(ptr as usize + length))
731        }
732    }
733
734    #[kani::proof]
735    #[kani::unwind(2)]
736    fn allocate_one() {
737        let mut allocator = BestFitAllocator::new();
738
739        let size: usize = kani::any();
740        kani::assume(size < MAX_ADDR - size_of::<BestFitMeta>() - BestFitAllocator::align_up());
741        kani::assume(size > 0);
742        let larger_size: usize = kani::any_where(|&x| {
743            x > size + size_of::<BestFitMeta>() + BestFitAllocator::align_up() && x < MAX_ADDR
744        });
745
746        if let Some(range) = alloc_range(larger_size) {
747            unsafe {
748                assert_eq!(allocator.add_range(&range), Ok(()));
749            }
750
751            let ptr = allocator.malloc(size, 1, None).unwrap();
752
753            verify_block(ptr, size, None);
754        }
755    }
756}
757// END VERIFICATION ---------------------------------------------------------------------------------------------------