kernel/mem/
alloc.rs

1//! This module provides a simple allocator.
2//! One implementation is the BestFitAllocator, which uses the best fit strategy.
3
4use core::{ops::Range, ptr::NonNull};
5
6use crate::{BUG_ON, utils};
7
8#[cfg(target_pointer_width = "64")]
9pub const MAX_ADDR: usize = 2_usize.pow(48);
10
11#[cfg(target_pointer_width = "32")]
12pub const MAX_ADDR: usize = usize::MAX;
13
14/// Allocator trait that provides a way to allocate and free memory.
15/// Normally you don't need to use this directly, rather use the `boxed::Box` type.
16///
17/// # Safety
18///
19/// Every block returned by `malloc` must be freed by `free` exactly once.
20/// A pointer allocated by one allocator must not be freed by another allocator.
21/// Each range added to the allocator must be valid for the whole lifetime of the allocator and must not overlap with any other range.
22/// The lifetime of any allocation is only valid as long as the allocator is valid. (A pointer must not be used after the allocator is dropped.)
23pub trait Allocator {
24    fn malloc<T>(&mut self, size: usize, align: usize) -> Result<NonNull<T>, utils::KernelError>;
25    unsafe fn free<T>(&mut self, ptr: NonNull<T>, size: usize);
26}
27
28/// The metadata that is before any block in the BestFitAllocator.
29struct BestFitMeta {
30    /// The size of the block in bytes.
31    size: usize,
32    /// The pointer to the next free block. This is `None` if the block is allocated.
33    next: Option<NonNull<u8>>,
34}
35
36/// This is an allocator implementation that uses the best fit strategy.
37/// That does mean, when we allocate a block, we try to find the smallest block that fits the requested size.
38/// Blocks are stored in a singly linked list. The important part is that the linked list is stored in-line with the memory blocks.
39/// This means that every block has a header that contains the size of the block and a pointer to the next block.
40pub struct BestFitAllocator {
41    /// Head of the free block list.
42    head: Option<NonNull<u8>>,
43}
44
45/// Implementation of the BestFitAllocator.
46impl BestFitAllocator {
47    pub const MIN_RANGE_SIZE: usize = size_of::<BestFitMeta>() + Self::align_up() + 1;
48
49    /// Creates a new BestFitAllocator.
50    ///
51    /// Returns the new BestFitAllocator.
52    pub const fn new() -> Self {
53        Self { head: None }
54    }
55
56    /// Adds a range of memory to the allocator.
57    ///
58    /// `range` - The range of memory to add.
59    ///
60    /// Returns `Ok(())` if the range was added successfully, otherwise an error.
61    ///
62    /// # Safety
63    ///
64    /// The range must be valid, 128bit aligned and must not overlapping with any other current or future range.
65    /// The range must also be at least as large as `MIN_RANGE_SIZE`.
66    /// Also the range must stay valid, for the whole lifetime of the allocator. Also the lifetime of any allocation is only valid as long as the allocator is valid.
67    pub unsafe fn add_range(&mut self, range: Range<usize>) -> Result<(), utils::KernelError> {
68        let ptr = range.start;
69
70        // Check if the pointer is 128bit aligned.
71        if ptr % align_of::<u128>() != 0 {
72            return Err(utils::KernelError::InvalidAlign);
73        }
74
75        if ptr == 0 {
76            return Err(utils::KernelError::InvalidAddress);
77        }
78
79        debug_assert!(range.end > range.start);
80        debug_assert!(range.end - range.start > size_of::<BestFitMeta>() + Self::align_up());
81        debug_assert!(range.end <= isize::MAX as usize);
82
83        // The user pointer is the pointer to the user memory. So we need to add the size of the meta data and possibly add padding.
84        let user_pointer = ptr + size_of::<BestFitMeta>() + Self::align_up();
85
86        // Set the current head as the next block, so we can add the new block to the head.
87        let meta = BestFitMeta {
88            size: range.end - user_pointer,
89            next: self.head,
90        };
91
92        // Write the header to the memory.
93        unsafe { core::ptr::write(ptr as *mut BestFitMeta, meta) };
94
95        // Set the head to the new block.
96        self.head = Some(unsafe { NonNull::new_unchecked(ptr as *mut u8) });
97        Ok(())
98    }
99
100    /// Calculates the padding required to align the block. Note: We only align to 128bit.
101    ///
102    /// Returns the padding in bytes.
103    const fn align_up() -> usize {
104        let meta = size_of::<BestFitMeta>();
105        let align = align_of::<u128>();
106        // Calculate the padding required to align the block.
107        (align - (meta % align)) % align
108    }
109
110    /// Selects the best fit block for the given size.
111    ///
112    /// `size` - The size of the block.
113    ///
114    /// Returns the control pointer to the block and the control pointer to the previous block.
115    fn select_block(
116        &mut self,
117        size: usize,
118    ) -> Result<(NonNull<u8>, Option<NonNull<u8>>), utils::KernelError> {
119        let mut best_fit = Err(utils::KernelError::OutOfMemory);
120        let mut best_fit_size = usize::MAX;
121
122        let mut current = self.head;
123        let mut prev = None;
124
125        // Iterate over all blocks and find the best fit.
126        while let Some(ptr) = current {
127            // Get the metadata of the block.
128            let meta = unsafe { ptr.cast::<BestFitMeta>().as_ref() };
129
130            // Check if the block is big enough and smaller than the current best fit.
131            if meta.size >= size && meta.size <= best_fit_size {
132                best_fit = Ok((ptr, prev));
133                best_fit_size = meta.size;
134            }
135
136            // Move to the next block.
137            prev = current;
138            current = meta.next;
139        }
140
141        best_fit
142    }
143
144    /// Calculates the user pointer from the control pointer.
145    ///
146    /// `ptr` - The control pointer.
147    ///
148    /// Returns the user pointer.
149    ///
150    /// # Safety
151    ///
152    /// The ptr must be a valid control pointer. Note: After the allocator which allocated the pointer is dropped, the control pointer is always considered invalid.
153    unsafe fn user_ptr(ptr: NonNull<u8>) -> NonNull<u8> {
154        debug_assert!(
155            (ptr.as_ptr() as usize)
156                <= isize::MAX as usize - size_of::<BestFitMeta>() - Self::align_up()
157        );
158        unsafe { ptr.byte_add(size_of::<BestFitMeta>() + Self::align_up()) }
159    }
160
161    /// Calculates the control pointer from the user pointer.
162    ///
163    /// `ptr` - The user pointer.
164    ///
165    /// Returns the control pointer.
166    ///
167    /// # Safety
168    ///
169    /// The ptr must be a valid user pointer. Note: After the allocator which allocated the pointer is dropped, the user pointer is always considered invalid.
170    unsafe fn control_ptr(ptr: NonNull<u8>) -> NonNull<u8> {
171        debug_assert!((ptr.as_ptr() as usize) > size_of::<BestFitMeta>() + Self::align_up());
172        unsafe { ptr.byte_sub(size_of::<BestFitMeta>() + Self::align_up()) }
173    }
174}
175
176/// Implementation of the Allocator trait for BestFitAllocator.
177impl Allocator for BestFitAllocator {
178    /// Allocates a block of memory with the given size and alignment. Note: This function will always yield an invalid align for align > 128bit.
179    ///
180    /// `size` - The size of the block.
181    /// `align` - The alignment of the block.
182    ///
183    /// Returns the user pointer to the block if successful, otherwise an error.
184    fn malloc<T>(&mut self, size: usize, align: usize) -> Result<NonNull<T>, utils::KernelError> {
185        // Check if the alignment is valid.
186        if align > align_of::<u128>() {
187            return Err(utils::KernelError::InvalidAlign);
188        }
189
190        // Check if the size is valid.
191        if size == 0 {
192            return Err(utils::KernelError::InvalidSize);
193        }
194
195        // For some cfg this warning is correct. But for others its not.
196        #[allow(clippy::absurd_extreme_comparisons)]
197        if size >= MAX_ADDR {
198            return Err(utils::KernelError::InvalidSize);
199        }
200
201        // Align the size.
202        let aligned_size = super::align_up(size);
203        debug_assert!(aligned_size >= size);
204        debug_assert!(aligned_size <= isize::MAX as usize);
205
206        // Find the best fit block.
207        let (split, block, prev) = match self.select_block(aligned_size) {
208            Ok((block, prev)) => {
209                // Get the metadata of the block.
210                let meta = unsafe { block.cast::<BestFitMeta>().as_mut() };
211
212                // Calculate the amount of bytes until the beginning of the possibly next metadata.
213                let min = aligned_size.saturating_add(size_of::<BestFitMeta>() + Self::align_up());
214
215                debug_assert!(
216                    (block.as_ptr() as usize)
217                        <= isize::MAX as usize
218                            - meta.size
219                            - size_of::<BestFitMeta>()
220                            - Self::align_up()
221                );
222
223                debug_assert!(
224                    meta.size < isize::MAX as usize - size_of::<BestFitMeta>() - Self::align_up()
225                );
226
227                // If the block is big enough to split. Then it also needs to be big enough to store the metadata + align of the next block.
228                if meta.size > min {
229                    // Calculate the remaining size of the block and thus the next metadata.
230                    let remaining_meta = BestFitMeta {
231                        size: meta.size - min,
232                        next: meta.next,
233                    };
234
235                    // Shrink the current block to the requested aligned_size + padding (which is not available to the user).
236                    meta.size = aligned_size;
237
238                    // Calculate the pointer to the next metadata.
239                    let ptr = unsafe { Self::user_ptr(block).byte_add(aligned_size) };
240
241                    unsafe {
242                        // Write the new metadata to the memory.
243                        ptr.cast::<BestFitMeta>().write(remaining_meta);
244                    }
245
246                    // If there is a previous block, we insert the new block after it. Otherwise we set it as the new head.
247                    if let Some(prev) = prev {
248                        let prev_meta = unsafe { prev.cast::<BestFitMeta>().as_mut() };
249                        prev_meta.next = Some(ptr);
250                    } else {
251                        self.head = Some(ptr);
252                    }
253
254                    // The next block of an allocated block is always None.
255                    meta.next = None;
256
257                    (true, block, prev)
258                } else {
259                    (false, block, prev)
260                }
261            }
262            Err(_) => {
263                let (block, prev) = self.select_block(size)?;
264                (false, block, prev)
265            }
266        };
267
268        if !split {
269            // Get the metadata of the block.
270            let meta = unsafe { block.cast::<BestFitMeta>().as_mut() };
271
272            if let Some(prev) = prev {
273                let prev_meta = unsafe { prev.cast::<BestFitMeta>().as_mut() };
274                // If there is a previous block, we remove the current block from the list. Ie. we set the next block of the previous block to the next block of the current block.
275                prev_meta.next = meta.next;
276            } else {
277                // If there is no previous block, we set the next block as the new head.
278                self.head = meta.next;
279            }
280
281            // The next block of an allocated block is always None.
282            meta.next = None;
283        }
284
285        // Return the user pointer.
286        Ok(unsafe { Self::user_ptr(block).cast() })
287    }
288
289    /// Frees a block of memory.
290    ///
291    /// `ptr` - The pointer to the block.
292    /// `size` - The size of the block. (This is used to check if the size of the block is correct.)
293    unsafe fn free<T>(&mut self, ptr: NonNull<T>, size: usize) {
294        let block = unsafe { Self::control_ptr(ptr.cast()) };
295        let meta = unsafe { block.cast::<BestFitMeta>().as_mut() };
296
297        // The next block of a free block is always the current head. We essentially insert the block at the beginning of the list.
298        meta.next = self.head;
299
300        // Check if the size of the block is correct.
301        BUG_ON!(meta.size != super::align_up(size), "Invalid size in free()");
302
303        // Set the size of the block.
304        meta.size = size;
305
306        // Set the block as the new head.
307        self.head = Some(block);
308    }
309}
310
311// TESTING ------------------------------------------------------------------------------------------------------------
312
313#[cfg(test)]
314mod tests {
315    use super::*;
316
317    fn verify_block(user_ptr: NonNull<u8>, size: usize, next: Option<NonNull<u8>>) {
318        let control_ptr = unsafe { BestFitAllocator::control_ptr(user_ptr) };
319        let meta = unsafe { control_ptr.cast::<BestFitMeta>().as_ref() };
320
321        assert!(meta.size >= size);
322        assert_eq!(meta.next, next);
323    }
324
325    fn verify_ptrs_not_overlaping(ptrs: &[(NonNull<u8>, usize)]) {
326        for (i, (ptr1, size1)) in ptrs.iter().enumerate() {
327            for (j, (ptr2, size2)) in ptrs.iter().enumerate() {
328                if i == j {
329                    continue;
330                }
331
332                let begin1 = ptr1.as_ptr() as usize;
333                let end1 = begin1 + size1;
334                let begin2 = ptr2.as_ptr() as usize;
335                let end2 = begin2 + size2;
336
337                assert!(end1 <= begin2 || end2 <= begin1);
338                assert!(begin1 != begin2);
339                assert!(end1 != end2);
340                assert!(*size1 > 0);
341                assert!(*size2 > 0);
342                assert!(end1 > begin1);
343                assert!(end2 > begin2);
344            }
345        }
346    }
347
348    fn alloc_range(length: usize) -> Range<usize> {
349        let alloc_range = std::alloc::Layout::from_size_align(length, align_of::<u128>()).unwrap();
350        let ptr = unsafe { std::alloc::alloc(alloc_range) };
351        ptr as usize..ptr as usize + length
352    }
353
354    #[test]
355    fn allocate_one() {
356        let mut allocator = BestFitAllocator::new();
357
358        let range = alloc_range(4096);
359        unsafe {
360            allocator.add_range(range).unwrap();
361        }
362
363        let ptr = allocator.malloc(128, 1).unwrap();
364
365        verify_block(ptr, 128, None);
366    }
367
368    #[test]
369    fn alloc_alot() {
370        let mut allocator = BestFitAllocator::new();
371        const CNT: usize = 100;
372        const SIZE: usize = 128;
373
374        let range = alloc_range(SIZE * CNT * 2);
375        unsafe {
376            allocator.add_range(range).unwrap();
377        }
378
379        let mut ptrs = Vec::new();
380        for _ in 0..CNT {
381            let ptr = allocator.malloc(SIZE, 1).unwrap();
382            verify_block(ptr, SIZE, None);
383            ptrs.push((ptr, SIZE));
384        }
385
386        verify_ptrs_not_overlaping(ptrs.as_slice());
387    }
388
389    #[test]
390    fn alloc_exact() {
391        let mut allocator = BestFitAllocator::new();
392        const CNT: usize = 10;
393        const SIZE: usize = 128;
394
395        let range =
396            alloc_range((SIZE + size_of::<BestFitMeta>() + BestFitAllocator::align_up()) * CNT);
397        unsafe {
398            allocator.add_range(range).unwrap();
399        }
400
401        let mut ptrs = Vec::new();
402        for _ in 0..CNT {
403            let ptr = allocator.malloc(SIZE, 1).unwrap();
404            verify_block(ptr, SIZE, None);
405            ptrs.push((ptr, SIZE));
406        }
407
408        verify_ptrs_not_overlaping(ptrs.as_slice());
409    }
410
411    #[test]
412    fn alloc_oom() {
413        let mut allocator = BestFitAllocator::new();
414        const CNT: usize = 10;
415        const SIZE: usize = 128;
416
417        let range =
418            alloc_range((SIZE + size_of::<BestFitMeta>() + BestFitAllocator::align_up()) * CNT - 1);
419        unsafe {
420            allocator.add_range(range).unwrap();
421        }
422
423        let mut ptrs = Vec::new();
424        for _ in 0..CNT - 1 {
425            let ptr = allocator.malloc(SIZE, 1).unwrap();
426            verify_block(ptr, SIZE, None);
427            ptrs.push(ptr);
428        }
429
430        let ptr = allocator.malloc::<u8>(SIZE, 1);
431        assert!(ptr.is_err_and(|e| e == utils::KernelError::OutOfMemory));
432    }
433
434    #[test]
435    fn alloc_no_oom_through_free() {
436        let mut allocator = BestFitAllocator::new();
437        const SIZE: usize = 128;
438
439        let range = alloc_range(SIZE + size_of::<BestFitMeta>() + BestFitAllocator::align_up());
440        unsafe {
441            allocator.add_range(range).unwrap();
442        }
443
444        let ptr = allocator.malloc(SIZE, 1).unwrap();
445        verify_block(ptr, SIZE, None);
446
447        unsafe {
448            allocator.free(ptr, SIZE);
449        }
450
451        let ptr = allocator.malloc(SIZE, 1).unwrap();
452        verify_block(ptr, SIZE, None);
453    }
454
455    #[test]
456    fn multi_range_alloc() {
457        let mut allocator = BestFitAllocator::new();
458        const CNT: usize = 10;
459        const SIZE: usize = 128;
460
461        let mut ranges = Vec::new();
462        for _ in 0..CNT {
463            let range = alloc_range(SIZE + size_of::<BestFitMeta>() + BestFitAllocator::align_up());
464            unsafe {
465                allocator.add_range(range.clone()).unwrap();
466            }
467            ranges.push(range);
468        }
469
470        let mut ptrs = Vec::new();
471        for _ in 0..CNT {
472            let ptr = allocator.malloc(SIZE, 1).unwrap();
473            verify_block(ptr, SIZE, None);
474            ptrs.push((ptr, SIZE));
475        }
476
477        verify_ptrs_not_overlaping(ptrs.as_slice());
478    }
479
480    #[test]
481    fn multi_range_no_oom_through_free() {
482        // This function allocates multiple ranges and then frees one of them randomly. And only then there is no oom.
483        let mut allocator = BestFitAllocator::new();
484
485        const CNT: usize = 10;
486        const SIZE: usize = 128;
487
488        let mut ranges = Vec::new();
489        for _ in 0..CNT {
490            let range = alloc_range(SIZE + size_of::<BestFitMeta>() + BestFitAllocator::align_up());
491            unsafe {
492                allocator.add_range(range.clone()).unwrap();
493            }
494            ranges.push(range);
495        }
496
497        let mut ptrs = Vec::new();
498
499        let ptr = allocator.malloc::<u8>(SIZE, 1).unwrap();
500
501        for _ in 0..CNT - 1 {
502            let ptr = allocator.malloc(SIZE, 1).unwrap();
503            verify_block(ptr, SIZE, None);
504            ptrs.push((ptr, SIZE));
505        }
506
507        unsafe {
508            allocator.free(ptr, SIZE);
509        }
510
511        let ptr = allocator.malloc(SIZE, 1).unwrap();
512        ptrs.push((ptr, SIZE));
513
514        verify_ptrs_not_overlaping(ptrs.as_slice());
515    }
516
517    #[test]
518    fn multi_range_oom() {
519        // This function allocates multiple ranges and then frees one of them randomly. And only then there is no oom.
520        let mut allocator = BestFitAllocator::new();
521
522        const CNT: usize = 10;
523        const SIZE: usize = 128;
524
525        let mut ranges = Vec::new();
526        for _ in 0..CNT {
527            let range = alloc_range(SIZE + size_of::<BestFitMeta>() + BestFitAllocator::align_up());
528            unsafe {
529                allocator.add_range(range.clone()).unwrap();
530            }
531            ranges.push(range);
532        }
533
534        let mut ptrs = Vec::new();
535
536        for _ in 0..CNT {
537            let ptr = allocator.malloc(SIZE, 1).unwrap();
538            verify_block(ptr, SIZE, None);
539            ptrs.push((ptr, SIZE));
540        }
541
542        let ptr = allocator.malloc::<u8>(SIZE, 1);
543        assert!(ptr.is_err_and(|e| e == utils::KernelError::OutOfMemory));
544
545        verify_ptrs_not_overlaping(ptrs.as_slice());
546    }
547}
548
549// END TESTING --------------------------------------------------------------------------------------------------------
550
551// VERIFICATION -------------------------------------------------------------------------------------------------------
552#[cfg(kani)]
553mod verification {
554    use super::*;
555    use core::{alloc::Layout, ptr};
556
557    fn verify_block(user_ptr: NonNull<u8>, size: usize, next: Option<NonNull<u8>>) {
558        let control_ptr = unsafe { BestFitAllocator::control_ptr(user_ptr) };
559        let meta = unsafe { control_ptr.cast::<BestFitMeta>().as_ref() };
560
561        assert!(meta.size >= size);
562        assert_eq!(meta.next, next);
563    }
564
565    fn alloc_range(length: usize) -> Option<Range<usize>> {
566        let alloc_range = std::alloc::Layout::from_size_align(length, align_of::<u128>()).unwrap();
567        let ptr = unsafe { std::alloc::alloc(alloc_range) };
568
569        if ptr.is_null() || ((ptr as usize) >= isize::MAX as usize - length) {
570            None
571        } else {
572            Some(ptr as usize..ptr as usize + length)
573        }
574    }
575
576    #[kani::proof]
577    #[kani::unwind(2)]
578    fn allocate_one() {
579        let mut allocator = BestFitAllocator::new();
580
581        let size: usize = kani::any();
582        kani::assume(size < MAX_ADDR - size_of::<BestFitMeta>() - BestFitAllocator::align_up());
583        kani::assume(size > 0);
584        let larger_size: usize = kani::any_where(|&x| {
585            x > size + size_of::<BestFitMeta>() + BestFitAllocator::align_up() && x < MAX_ADDR
586        });
587
588        if let Some(range) = alloc_range(larger_size) {
589            unsafe {
590                assert_eq!(allocator.add_range(range), Ok(()));
591            }
592
593            let ptr = allocator.malloc(size, 1).unwrap();
594
595            verify_block(ptr, size, None);
596        }
597    }
598}
599// END VERIFICATION ---------------------------------------------------------------------------------------------------