osiris/types/
pool.rs

1//! This module provides a pool allocator implementation.
2
3#![allow(dead_code)]
4
5use core::{
6    marker::PhantomData,
7    num::NonZeroUsize,
8    ops::{Deref, DerefMut, Range},
9    ptr::write,
10};
11
12/// Meta information for a block in the pool.
13struct SizedPoolMeta {
14    _size: usize,
15    next: Option<NonZeroUsize>,
16}
17
18/// A pool allocator that allocates fixed-size blocks.
19pub struct SizedPool<T: Default> {
20    head: Option<NonZeroUsize>,
21    _marker: PhantomData<T>,
22}
23
24impl<T: Default> Default for SizedPool<T> {
25    fn default() -> Self {
26        Self::new()
27    }
28}
29
30impl<T: Default> SizedPool<T> {
31    /// Create a new empty pool.
32    pub const fn new() -> Self {
33        Self {
34            head: None,
35            _marker: PhantomData,
36        }
37    }
38
39    /// Calculate the padding required to align the block to `align_of::<T>()`.
40    const fn align_up() -> usize {
41        let meta = size_of::<SizedPoolMeta>();
42        let align = align_of::<T>();
43        // Calculate the padding required to align the block.
44        (align - (meta % align)) % align
45    }
46
47    /// Add a range of blocks to the pool.
48    ///
49    /// `range` - The range of blocks to add.
50    ///
51    /// # Safety
52    ///
53    /// The caller must ensure that the range is valid and that the blocks are at least the size of `T` + `SizedPoolMeta` + Padding for `T`.
54    pub unsafe fn add_range(&mut self, range: Range<usize>) {
55        let mut ptr = range.start;
56
57        while ptr < range.end {
58            unsafe {
59                self.add_block(ptr);
60            }
61
62            ptr += Self::align_up() + size_of::<SizedPoolMeta>() + size_of::<T>();
63        }
64    }
65
66    /// Add a block to the pool.
67    ///
68    /// `ptr` - The pointer to the block to add.
69    ///
70    /// # Safety
71    ///
72    /// The caller must ensure that the pointer is valid and that the block is at least the size of `T` + `SizedPoolMeta` + Padding for `T`.
73    unsafe fn add_block(&mut self, ptr: usize) {
74        let meta = SizedPoolMeta {
75            _size: size_of::<T>(),
76            next: self.head,
77        };
78
79        unsafe {
80            write(ptr as *mut SizedPoolMeta, meta);
81        }
82
83        self.head = Some(unsafe { NonZeroUsize::new_unchecked(ptr) });
84    }
85
86    /// Allocate a block from the pool.
87    ///
88    /// Returns `Some(Owned<T>)` if a block was successfully allocated, otherwise `None`.
89    pub fn alloc(&mut self) -> Option<Owned<T>> {
90        let head = self.head.take();
91
92        head.map(|head| {
93            let meta = unsafe { &*(head.get() as *const SizedPoolMeta) };
94            self.head = meta.next;
95
96            let ptr = head.get() + size_of::<SizedPoolMeta>() + Self::align_up();
97            unsafe { write(ptr as *mut T, T::default()) };
98
99            Owned { ptr: ptr as *mut T }
100        })
101    }
102
103    /// Deallocate a block back to the pool.
104    ///
105    /// `block` - The block to deallocate.
106    pub fn dealloc(&mut self, block: Owned<T>) {
107        let ptr = block.ptr as usize - size_of::<SizedPoolMeta>() - Self::align_up();
108
109        // Append ptr to the front of the list.
110        let head = self
111            .head
112            .replace(unsafe { NonZeroUsize::new_unchecked(ptr) });
113
114        // Update the next pointer to the previous head.
115        let meta = unsafe { &mut *(ptr as *mut SizedPoolMeta) };
116        meta.next = head;
117    }
118}
119
120/// An owned block from a pool.
121pub struct Owned<T> {
122    ptr: *mut T,
123}
124
125impl<T: Default> Deref for Owned<T> {
126    type Target = T;
127
128    fn deref(&self) -> &Self::Target {
129        unsafe { &*self.ptr }
130    }
131}
132
133impl<T: Default> DerefMut for Owned<T> {
134    fn deref_mut(&mut self) -> &mut Self::Target {
135        unsafe { &mut *self.ptr }
136    }
137}