kernel/mem/
pool.rs

1//! This module provides a pool allocator implementation.
2
3use core::{
4    marker::PhantomData,
5    num::NonZeroUsize,
6    ops::{Deref, DerefMut, Range},
7    ptr::write,
8};
9
10/// Meta information for a block in the pool.
11struct SizedPoolMeta {
12    _size: usize,
13    next: Option<NonZeroUsize>,
14}
15
16/// A pool allocator that allocates fixed-size blocks.
17pub struct SizedPool<T: Default> {
18    head: Option<NonZeroUsize>,
19    _marker: PhantomData<T>,
20}
21
22impl<T: Default> Default for SizedPool<T> {
23    fn default() -> Self {
24        Self::new()
25    }
26}
27
28impl<T: Default> SizedPool<T> {
29    /// Create a new empty pool.
30    pub const fn new() -> Self {
31        Self {
32            head: None,
33            _marker: PhantomData,
34        }
35    }
36
37    /// Calculate the padding required to align the block to `align_of::<T>()`.
38    const fn align_up() -> usize {
39        let meta = size_of::<SizedPoolMeta>();
40        let align = align_of::<T>();
41        // Calculate the padding required to align the block.
42        (align - (meta % align)) % align
43    }
44
45    /// Add a range of blocks to the pool.
46    ///
47    /// `range` - The range of blocks to add.
48    ///
49    /// # Safety
50    ///
51    /// The caller must ensure that the range is valid and that the blocks are at least the size of `T` + `SizedPoolMeta` + Padding for `T`.
52    pub unsafe fn add_range(&mut self, range: Range<usize>) {
53        let mut ptr = range.start;
54
55        while ptr < range.end {
56            unsafe {
57                self.add_block(ptr);
58            }
59
60            ptr += Self::align_up() + size_of::<SizedPoolMeta>() + size_of::<T>();
61        }
62    }
63
64    /// Add a block to the pool.
65    ///
66    /// `ptr` - The pointer to the block to add.
67    ///
68    /// # Safety
69    ///
70    /// The caller must ensure that the pointer is valid and that the block is at least the size of `T` + `SizedPoolMeta` + Padding for `T`.
71    unsafe fn add_block(&mut self, ptr: usize) {
72        let meta = SizedPoolMeta {
73            _size: size_of::<T>(),
74            next: self.head,
75        };
76
77        unsafe {
78            write(ptr as *mut SizedPoolMeta, meta);
79        }
80
81        self.head = Some(unsafe { NonZeroUsize::new_unchecked(ptr) });
82    }
83
84    /// Allocate a block from the pool.
85    ///
86    /// Returns `Some(Owned<T>)` if a block was successfully allocated, otherwise `None`.
87    pub fn alloc(&mut self) -> Option<Owned<T>> {
88        let head = self.head.take();
89
90        head.map(|head| {
91            let meta = unsafe { &*(head.get() as *const SizedPoolMeta) };
92            self.head = meta.next;
93
94            let ptr = head.get() + size_of::<SizedPoolMeta>() + Self::align_up();
95            unsafe { write(ptr as *mut T, T::default()) };
96
97            Owned { ptr: ptr as *mut T }
98        })
99    }
100
101    /// Deallocate a block back to the pool.
102    ///
103    /// `block` - The block to deallocate.
104    pub fn dealloc(&mut self, block: Owned<T>) {
105        let ptr = block.ptr as usize - size_of::<SizedPoolMeta>() - Self::align_up();
106
107        // Append ptr to the front of the list.
108        let head = self
109            .head
110            .replace(unsafe { NonZeroUsize::new_unchecked(ptr) });
111
112        // Update the next pointer to the previous head.
113        let meta = unsafe { &mut *(ptr as *mut SizedPoolMeta) };
114        meta.next = head;
115    }
116}
117
118/// An owned block from a pool.
119pub struct Owned<T> {
120    ptr: *mut T,
121}
122
123impl<T: Default> Deref for Owned<T> {
124    type Target = T;
125
126    fn deref(&self) -> &Self::Target {
127        unsafe { &*self.ptr }
128    }
129}
130
131impl<T: Default> DerefMut for Owned<T> {
132    fn deref_mut(&mut self) -> &mut Self::Target {
133        unsafe { &mut *self.ptr }
134    }
135}