1use crate::mem::pfa::PAGE_SIZE;
4use crate::mem::vmm::{AddressSpacelike, Backing, Perms, Region};
5use crate::sync::spinlock::SpinLocked;
6use alloc::Allocator;
7use core::ptr::NonNull;
8use hal::mem::PhysAddr;
9
10pub mod alloc;
11pub mod pfa;
12pub mod vmm;
13
14#[allow(dead_code)]
15pub const BITS_PER_PTR: usize = core::mem::size_of::<usize>() * 8;
16
17unsafe extern "C" {
18 unsafe static __stack_top: u8;
19}
20
21static GLOBAL_ALLOCATOR: SpinLocked<alloc::bestfit::BestFitAllocator> =
23 SpinLocked::new(alloc::bestfit::BestFitAllocator::new());
24
25pub fn init_memory() -> vmm::AddressSpace {
31 let stack_top = &raw const __stack_top as usize;
32 if let Err(e) = pfa::init_pfa(PhysAddr::new(stack_top)) {
33 panic!("failed to initialize PFA. Error: {e}");
35 }
36
37 let pgs = 10;
39
40 let mut kaddr_space = vmm::AddressSpace::new(pgs).unwrap_or_else(|e| {
41 panic!("failed to create kernel address space. Error: {e}");
42 });
43
44 let begin = kaddr_space
45 .map(Region::new(
46 None,
47 2 * PAGE_SIZE,
48 Backing::Zeroed,
49 Perms::all(),
50 ))
51 .unwrap_or_else(|e| {
52 panic!("failed to map kernel address space. Error: {e}");
53 });
54
55 {
56 let mut allocator = GLOBAL_ALLOCATOR.lock();
57
58 let range = begin..(begin + pgs * PAGE_SIZE);
59 if let Err(e) = unsafe { allocator.add_range(&range) } {
60 panic!("failed to add range to allocator. Error: {e}");
61 }
62 }
63
64 kaddr_space
65}
66
67pub fn malloc(size: usize, align: usize) -> Option<NonNull<u8>> {
74 let mut allocator = GLOBAL_ALLOCATOR.lock();
75 allocator.malloc(size, align, None).ok()
76}
77
78pub unsafe fn free(ptr: NonNull<u8>, size: usize) {
87 let mut allocator = GLOBAL_ALLOCATOR.lock();
88 unsafe { allocator.free(ptr, size) };
89}
90
91pub fn align_up(size: usize) -> usize {
97 if size >= (usize::MAX - align_of::<u128>()) {
98 return usize::MAX;
99 }
100
101 let align = align_of::<u128>();
102 (size + align - 1) & !(align - 1)
103}