kernel/
mem.rs

1//! This module provides access to the global memory allocator.
2
3use crate::sync::spinlock::SpinLocked;
4use crate::{BootInfo, utils};
5use alloc::Allocator;
6use core::ptr::NonNull;
7
8pub mod alloc;
9pub mod array;
10pub mod boxed;
11pub mod heap;
12pub mod pool;
13pub mod queue;
14
15/// The possible types of memory. Which is compatible with the multiboot2 memory map.
16/// Link: https://www.gnu.org/software/grub/manual/multiboot/multiboot.html
17#[repr(C)]
18#[allow(unused)]
19enum MemoryTypes {
20    /// Memory that is available for use.
21    Available = 1,
22    /// Memory that is reserved for the system.
23    Reserved = 2,
24    /// Memory that is reclaimable after ACPI tables are read.
25    ACPIReclaimable = 3,
26    /// ACPI Non-volatile-sleeping memory.
27    Nvs = 4,
28    /// Memory that is bad.
29    BadMemory = 5,
30}
31
32/// The global memory allocator.
33static GLOBAL_ALLOCATOR: SpinLocked<alloc::BestFitAllocator> =
34    SpinLocked::new(alloc::BestFitAllocator::new());
35
36/// Initialize the memory allocator.
37///
38/// `boot_info` - The boot information. This contains the memory map.
39///
40/// Returns an error if the memory allocator could not be initialized.
41pub fn init_memory(boot_info: &BootInfo) -> Result<(), utils::KernelError> {
42    let mut allocator = GLOBAL_ALLOCATOR.lock();
43
44    for entry in boot_info.mmap.iter().take(boot_info.mmap_len) {
45        // We only add available memory to the allocator.
46        if entry.ty == MemoryTypes::Available as u32 {
47            let range = entry.addr as usize..(entry.addr + entry.length) as usize;
48            unsafe {
49                allocator.add_range(range)?;
50            }
51        }
52    }
53
54    Ok(())
55}
56
57/// Allocate a memory block. Normally Box<T> or SizedPool<T> should be used instead of this function.
58///
59/// `size` - The size of the memory block to allocate.
60/// `align` - The alignment of the memory block.
61///
62/// Returns a pointer to the allocated memory block if the allocation was successful, or `None` if the allocation failed.
63pub fn malloc(size: usize, align: usize) -> Option<NonNull<u8>> {
64    let mut allocator = GLOBAL_ALLOCATOR.lock();
65    allocator.malloc(size, align).ok()
66}
67
68/// Free a memory block.
69///
70/// `ptr` - The pointer to the memory block.
71/// `size` - The size of the memory block.
72///
73/// # Safety
74///
75/// The caller must ensure that the pointer is from a previous call to `malloc` and that the size is still the same.
76pub unsafe fn free(ptr: NonNull<u8>, size: usize) {
77    let mut allocator = GLOBAL_ALLOCATOR.lock();
78    unsafe { allocator.free(ptr, size) };
79}
80
81/// Aligns a size to be a multiple of the u128 alignment.
82///
83/// `size` - The size to align.
84///
85/// Returns the aligned size.
86pub fn align_up(size: usize) -> usize {
87    if size >= (usize::MAX - align_of::<u128>()) {
88        return usize::MAX;
89    }
90
91    let align = align_of::<u128>();
92    (size + align - 1) & !(align - 1)
93}
94
95// VERIFICATION -------------------------------------------------------------------------------------------------------
96#[cfg(kani)]
97mod verification {
98    use crate::MemMapEntry;
99
100    use super::*;
101    use kani::Arbitrary;
102
103    impl Arbitrary for MemMapEntry {
104        fn any() -> Self {
105            let size = size_of::<MemMapEntry>() as u32;
106            let length = kani::any();
107            let addr = kani::any();
108
109            kani::assume(
110                length < alloc::MAX_ADDR as u64
111                    && length > alloc::BestFitAllocator::MIN_RANGE_SIZE as u64,
112            );
113            kani::assume(addr < alloc::MAX_ADDR as u64 - length && addr > 0);
114
115            MemMapEntry {
116                size,
117                addr,
118                length,
119                ty: kani::any(),
120            }
121        }
122
123        fn any_array<const MAX_ARRAY_LENGTH: usize>() -> [Self; MAX_ARRAY_LENGTH] {
124            [(); MAX_ARRAY_LENGTH].map(|_| Self::any())
125        }
126    }
127
128    fn mock_ptr_write<T>(dst: *mut T, src: T) {
129        // Just a noop
130    }
131
132    #[kani::proof]
133    #[kani::stub(core::ptr::write, mock_ptr_write)]
134    fn proof_init_allocator_good() {
135        let mmap: [MemMapEntry; _] = kani::any();
136
137        kani::assume(mmap.len() > 0 && mmap.len() <= 8);
138        for entry in mmap.iter() {
139            // Ensure aligned.
140            kani::assume(entry.addr % align_of::<u128>() as u64 == 0);
141            // Ensure valid range.
142            kani::assume(entry.addr > 0);
143            kani::assume(entry.length > 0);
144
145            // Ensure non overlapping entries
146            for other in mmap.iter() {
147                if entry.addr != other.addr {
148                    kani::assume(
149                        entry.addr + entry.length <= other.addr
150                            || other.addr + other.length <= entry.addr,
151                    );
152                }
153            }
154        }
155
156        let mmap_len = mmap.len();
157
158        let boot_info = BootInfo {
159            implementer: core::ptr::null(),
160            variant: core::ptr::null(),
161            mmap,
162            mmap_len,
163        };
164
165        assert!(init_memory(&boot_info).is_ok());
166    }
167
168    #[kani::proof]
169    fn check_align_up() {
170        let size = kani::any();
171        kani::assume(size > 0);
172        let align = align_up(size);
173        assert_ne!(align, 0);
174
175        if align != usize::MAX {
176            assert_eq!(align % align_of::<u128>(), 0);
177            assert!(align >= size);
178        }
179    }
180}
181// END VERIFICATION