1use core::pin::Pin;
2use core::ptr::NonNull;
3
4use hal::mem::PhysAddr;
5
6use crate::{
7 error::Result,
8 types::{
9 bitset::BitAlloc,
10 boxed::{self, Box},
11 },
12};
13
14pub struct Allocator<const N: usize> {
15 begin: PhysAddr,
16 bitalloc: BitAlloc<N>,
17}
18
19impl<const N: usize> Allocator<N> {
20 pub fn new(begin: PhysAddr) -> Option<Self> {
21 if !begin.is_multiple_of(super::PAGE_SIZE) {
22 return None;
23 }
24
25 if begin > PhysAddr::MAX - (N * super::PAGE_SIZE * usize::BITS as usize) {
26 return None;
27 }
28
29 Some(Self {
30 begin,
31 bitalloc: BitAlloc::new(N * BitAlloc::<N>::BITS_PER_WORD)?,
32 })
33 }
34}
35
36impl<const N: usize> super::Allocator<N> for Allocator<N> {
37 fn initializer() -> unsafe fn(PhysAddr, usize) -> Result<Pin<Box<Self>>> {
38 |addr: PhysAddr, pcnt: usize| -> Result<Pin<Box<Self>>> {
39 if pcnt > N {
40 todo!("Runtime page frame allocator for more than {} pages", N)
41 }
42
43 if !addr.is_multiple_of(core::mem::align_of::<Self>()) {
44 return Err(kerr!(InvalidArgument));
45 }
46
47 let ptr = NonNull::new(addr.as_mut_ptr::<Self>()).ok_or(kerr!(InvalidArgument))?;
48 let begin = addr + size_of::<Self>();
50 let begin = if begin.is_multiple_of(super::PAGE_SIZE) {
51 begin
52 } else {
53 PhysAddr::new((begin.as_usize() + super::PAGE_SIZE - 1) & !(super::PAGE_SIZE - 1))
54 };
55 unsafe {
57 core::ptr::write(
58 ptr.as_ptr(),
59 Self::new(begin).ok_or(kerr!(InvalidArgument))?,
60 )
61 };
62
63 Ok(Pin::new(unsafe { boxed::Box::from_raw(ptr) }))
65 }
66 }
67
68 fn alloc(&mut self, page_count: usize) -> Option<PhysAddr> {
69 let idx = self.bitalloc.alloc(page_count)?;
70 Some(self.begin + (idx * super::PAGE_SIZE))
71 }
72
73 fn free(&mut self, addr: PhysAddr, page_count: usize) {
74 if !addr.is_multiple_of(super::PAGE_SIZE) {
75 panic!("Address must be page aligned");
76 }
77 let idx = addr.diff(self.begin) / super::PAGE_SIZE;
78 self.bitalloc.free(idx, page_count);
79 }
80}