1use core::ptr::copy_nonoverlapping;
2
3use hal::mem::{PhysAddr, VirtAddr};
4
5use crate::{
6 error::Result,
7 mem::{
8 alloc::{Allocator, bestfit},
9 pfa, vmm,
10 },
11};
12
13pub struct AddressSpace {
14 begin: PhysAddr,
15 #[allow(dead_code)]
16 end: PhysAddr,
17 allocator: bestfit::BestFitAllocator,
18}
19
20impl vmm::AddressSpacelike for AddressSpace {
21 fn new(pgs: usize) -> Result<Self> {
22 let begin = pfa::alloc_page(pgs).ok_or(kerr!(OutOfMemory))?;
23 let end = begin
24 .checked_add(pgs * pfa::PAGE_SIZE)
25 .ok_or(kerr!(OutOfMemory))?;
26
27 let mut allocator = bestfit::BestFitAllocator::new();
28 unsafe { allocator.add_range(&(begin..end))? };
29
30 Ok(Self {
31 begin,
32 end,
33 allocator,
34 })
35 }
36
37 fn map(&mut self, region: vmm::Region) -> Result<PhysAddr> {
38 let req = region.start.and_then(|virt| self.virt_to_phys(virt));
39 let align = core::mem::align_of::<u128>();
41 let start = self.allocator.malloc::<u8>(region.len(), align, req)?;
42
43 match region.backing {
44 vmm::Backing::Anon(phys) => {
45 unsafe {
46 copy_nonoverlapping(phys.as_mut_ptr::<u8>(), start.as_ptr(), region.len())
47 };
48 }
49 vmm::Backing::Zeroed => {
50 unsafe { core::ptr::write_bytes(start.as_ptr(), 0, region.len()) };
51 }
52 vmm::Backing::Uninit => {}
53 }
54
55 Ok(start.into())
56 }
57
58 fn unmap(&mut self, _region: &vmm::Region) -> Result<()> {
59 Ok(())
60 }
61
62 fn protect(&mut self, _region: &vmm::Region, _perms: vmm::Perms) -> Result<()> {
63 Ok(())
64 }
65
66 fn phys_to_virt(&self, addr: PhysAddr) -> Option<VirtAddr> {
67 addr.checked_sub(self.begin.as_usize())
68 .map(|phys| VirtAddr::new(phys.as_usize()))
69 }
70
71 fn virt_to_phys(&self, addr: VirtAddr) -> Option<PhysAddr> {
72 self.begin.checked_add(addr.as_usize())
73 }
74
75 fn end(&self) -> VirtAddr {
76 self.phys_to_virt(self.end).unwrap()
78 }
79
80 fn activate(&self) -> Result<()> {
81 Ok(())
82 }
83}