Physical Memory Management Improvements.

This commit is contained in:
Drew 2025-12-05 21:15:00 -08:00
parent e0d1f83d8a
commit d42494f124
4 changed files with 212 additions and 18 deletions

View file

@ -3,7 +3,8 @@ use crate::syscall;
use crate::zion::ZError;
use alloc::slice;
use core::fmt::Debug;
use core::ptr::{addr_of, addr_of_mut};
use core::ops::Deref;
use core::ptr::{addr_of, addr_of_mut, read_volatile, write_volatile, NonNull};
#[cfg(feature = "hosted")]
use linked_list_allocator::LockedHeap;
@ -29,6 +30,7 @@ pub fn init_heap() {
pub struct MemoryRegion {
mem_cap: Capability,
virt_addr: u64,
// TODO: This should be a usize probably.
size: u64,
}
@ -94,13 +96,50 @@ impl MemoryRegion {
}
}
pub fn zero_region(&self) {
for i in self.mut_slice() {
*i = 0;
}
}
pub fn raw_ptr_at_offset<T>(&self, offset: u64) -> *const T {
// TODO: Come up with a better safety check here.
// We can't use the size of T because it might not be sized.
assert!(offset + size_of::<T>() as u64 <= self.size);
(self.virt_addr + offset) as *const T
}
pub fn mut_ptr_at_offset<T>(&self, offset: usize) -> *mut T {
assert!(offset + size_of::<T>() <= self.size as usize);
(self.virt_addr as usize + offset) as *mut T
}
/// Creates a reference from a given offset.
///
/// SAFETY: Caller must ensure that the memory pointed to by this
/// pointer must not get mutated while the reference exists.
pub unsafe fn as_ref_at_offset<T>(&self, offset: usize) -> &T {
let ptr: *const T = self.raw_ptr_at_offset(offset as u64);
assert!(ptr.is_aligned(), "");
// SAFETY:
// - We checked alignment.
// - self.vaddr + offset can't be null.
// - It is dereferenceable because it is entirely within this memory region.
&*self.raw_ptr_at_offset::<T>(offset as u64)
}
/// Creates a reference from a given offset.
///
/// SAFETY: Caller must ensure that this is the only reference to the memory pointed
/// to by this pointer.
pub unsafe fn as_mut_ref_at_offset<T>(&self, offset: usize) -> &mut T {
let ptr: *const T = self.raw_ptr_at_offset(offset as u64);
assert!(ptr.is_aligned(), "");
// SAFETY:
// - We checked alignment.
// - self.vaddr + offset can't be null.
// - It is dereferenceable because it is entirely within this memory region.
&mut *self.mut_ptr_at_offset::<T>(offset)
}
pub fn cap(&self) -> &Capability {
&self.mem_cap
}
@ -137,28 +176,22 @@ impl Drop for MemoryRegion {
}
}
pub struct Volatile<T> {
/// TODO: This should maybe be MaybeUninit.
data: T,
}
#[repr(transparent)]
pub struct Volatile<T: Copy>(T);
impl<T> Volatile<T> {
pub fn read(&self) -> T
where
T: Copy,
{
unsafe { addr_of!(self.data).cast::<T>().read_volatile() }
impl<T: Copy> Volatile<T> {
pub fn read(&self) -> T {
unsafe { read_volatile(addr_of!(self.0)) }
}
pub fn write(&mut self, data: T) {
unsafe {
addr_of_mut!(self.data).cast::<T>().write_volatile(data);
write_volatile(addr_of_mut!(self.0), data);
}
}
pub fn update<F>(&mut self, func: F)
where
T: Copy,
F: Fn(&mut T),
{
let mut data = self.read();
@ -176,6 +209,37 @@ where
}
}
#[macro_export]
macro_rules! read_unaligned_volatile {
($struct_ptr:expr, $field:ident) => {
unsafe {
let field_ptr = core::ptr::addr_of!((*$struct_ptr).$field);
core::ptr::read_volatile(field_ptr as *const _)
}
};
}
#[macro_export]
macro_rules! write_unaligned_volatile {
($struct_ptr:expr, $field:ident, $value:expr) => {
unsafe {
let field_ptr = core::ptr::addr_of!((*$struct_ptr).$field);
core::ptr::write_volatile(field_ptr as *mut _, $value);
}
};
}
#[macro_export]
macro_rules! map_unaligned_volatile {
($struct_ptr:expr, $field:ident, $func:expr) => {
unsafe {
let field_ptr = core::ptr::addr_of!((*$struct_ptr).$field);
let value = core::ptr::read_volatile(field_ptr as *const _);
core::ptr::write_volatile(field_ptr as *mut _, ($func)(value));
}
};
}
pub fn map_cap_and_leak(mem_cap: Capability) -> u64 {
let vaddr = syscall::address_space_map(&mem_cap).unwrap();
mem_cap.release();