diff --git a/rust/lib/mammoth/src/elf.rs b/rust/lib/mammoth/src/elf.rs index 2b8638f..dd33876 100644 --- a/rust/lib/mammoth/src/elf.rs +++ b/rust/lib/mammoth/src/elf.rs @@ -239,9 +239,7 @@ fn load_program_segment( let mem_object = crate::mem::MemoryRegion::new(mem_size)?; - for i in mem_object.mut_slice() { - *i = 0; - } + mem_object.zero_region(); let file_start = prog_header.offset as usize; let file_end = file_start + prog_header.file_size as usize; diff --git a/rust/lib/mammoth/src/lib.rs b/rust/lib/mammoth/src/lib.rs index 2058a6b..ce4f915 100644 --- a/rust/lib/mammoth/src/lib.rs +++ b/rust/lib/mammoth/src/lib.rs @@ -13,6 +13,7 @@ mod cap_syscall; pub mod elf; pub mod init; pub mod mem; +pub mod physical_box; pub mod port; pub mod sync; pub mod syscall; diff --git a/rust/lib/mammoth/src/mem.rs b/rust/lib/mammoth/src/mem.rs index 8d5af40..7b81806 100644 --- a/rust/lib/mammoth/src/mem.rs +++ b/rust/lib/mammoth/src/mem.rs @@ -3,7 +3,8 @@ use crate::syscall; use crate::zion::ZError; use alloc::slice; use core::fmt::Debug; -use core::ptr::{addr_of, addr_of_mut}; +use core::ops::Deref; +use core::ptr::{addr_of, addr_of_mut, read_volatile, write_volatile, NonNull}; #[cfg(feature = "hosted")] use linked_list_allocator::LockedHeap; @@ -29,6 +30,7 @@ pub fn init_heap() { pub struct MemoryRegion { mem_cap: Capability, virt_addr: u64, + // TODO: This should be a usize probably. size: u64, } @@ -94,13 +96,50 @@ impl MemoryRegion { } } + pub fn zero_region(&self) { + for i in self.mut_slice() { + *i = 0; + } + } + pub fn raw_ptr_at_offset(&self, offset: u64) -> *const T { - // TODO: Come up with a better safety check here. - // We can't use the size of T because it might not be sized. assert!(offset + size_of::() as u64 <= self.size); (self.virt_addr + offset) as *const T } + pub fn mut_ptr_at_offset(&self, offset: usize) -> *mut T { + assert!(offset + size_of::() <= self.size as usize); + (self.virt_addr as usize + offset) as *mut T + } + + /// Creates a reference from a given offset. + /// + /// SAFETY: Caller must ensure that the memory pointed to by this + /// pointer must not get mutated while the reference exists. + pub unsafe fn as_ref_at_offset(&self, offset: usize) -> &T { + let ptr: *const T = self.raw_ptr_at_offset(offset as u64); + assert!(ptr.is_aligned(), ""); + // SAFETY: + // - We checked alignment. + // - self.vaddr + offset can't be null. + // - It is dereferenceable because it is entirely within this memory region. + &*self.raw_ptr_at_offset::(offset as u64) + } + + /// Creates a reference from a given offset. + /// + /// SAFETY: Caller must ensure that this is the only reference to the memory pointed + /// to by this pointer. + pub unsafe fn as_mut_ref_at_offset(&self, offset: usize) -> &mut T { + let ptr: *const T = self.raw_ptr_at_offset(offset as u64); + assert!(ptr.is_aligned(), ""); + // SAFETY: + // - We checked alignment. + // - self.vaddr + offset can't be null. + // - It is dereferenceable because it is entirely within this memory region. + &mut *self.mut_ptr_at_offset::(offset) + } + pub fn cap(&self) -> &Capability { &self.mem_cap } @@ -137,28 +176,22 @@ impl Drop for MemoryRegion { } } -pub struct Volatile { - /// TODO: This should maybe be MaybeUninit. - data: T, -} +#[repr(transparent)] +pub struct Volatile(T); -impl Volatile { - pub fn read(&self) -> T - where - T: Copy, - { - unsafe { addr_of!(self.data).cast::().read_volatile() } +impl Volatile { + pub fn read(&self) -> T { + unsafe { read_volatile(addr_of!(self.0)) } } pub fn write(&mut self, data: T) { unsafe { - addr_of_mut!(self.data).cast::().write_volatile(data); + write_volatile(addr_of_mut!(self.0), data); } } pub fn update(&mut self, func: F) where - T: Copy, F: Fn(&mut T), { let mut data = self.read(); @@ -176,6 +209,37 @@ where } } +#[macro_export] +macro_rules! read_unaligned_volatile { + ($struct_ptr:expr, $field:ident) => { + unsafe { + let field_ptr = core::ptr::addr_of!((*$struct_ptr).$field); + core::ptr::read_volatile(field_ptr as *const _) + } + }; +} + +#[macro_export] +macro_rules! write_unaligned_volatile { + ($struct_ptr:expr, $field:ident, $value:expr) => { + unsafe { + let field_ptr = core::ptr::addr_of!((*$struct_ptr).$field); + core::ptr::write_volatile(field_ptr as *mut _, $value); + } + }; +} + +#[macro_export] +macro_rules! map_unaligned_volatile { + ($struct_ptr:expr, $field:ident, $func:expr) => { + unsafe { + let field_ptr = core::ptr::addr_of!((*$struct_ptr).$field); + let value = core::ptr::read_volatile(field_ptr as *const _); + core::ptr::write_volatile(field_ptr as *mut _, ($func)(value)); + } + }; +} + pub fn map_cap_and_leak(mem_cap: Capability) -> u64 { let vaddr = syscall::address_space_map(&mem_cap).unwrap(); mem_cap.release(); diff --git a/rust/lib/mammoth/src/physical_box.rs b/rust/lib/mammoth/src/physical_box.rs new file mode 100644 index 0000000..692eed1 --- /dev/null +++ b/rust/lib/mammoth/src/physical_box.rs @@ -0,0 +1,131 @@ +use core::{ + marker::PhantomData, + ops::{Deref, DerefMut, Index, IndexMut}, + ptr::NonNull, +}; + +use alloc::{slice, vec::Vec}; + +use crate::mem::MemoryRegion; + +pub struct PhysicalBox { + data: NonNull, + region: MemoryRegion, + physical_address: usize, + _marker: PhantomData, +} + +impl PhysicalBox { + pub fn physical_address(&self) -> usize { + self.physical_address + } +} + +impl Deref for PhysicalBox { + type Target = T; + + fn deref(&self) -> &Self::Target { + // SAFETY: + // - Alignment: This is page aligned. + // - Dereferenceable: Guaranteed in same allocation. + // - Aliasing: The borrow rules ensure this + unsafe { self.data.as_ref() } + } +} + +impl DerefMut for PhysicalBox { + fn deref_mut(&mut self) -> &mut Self::Target { + // SAFETY: + // - Alignment: This is page aligned. + // - Dereferenceable: Guaranteed in same allocation. + // - Aliasing: The borrow rules ensure this + unsafe { self.data.as_mut() } + } +} + +impl PhysicalBox<[T]> { + pub fn default_with_count(default: T, len: usize) -> Self + where + T: Clone, + { + let layout = core::alloc::Layout::array::(len).expect("Layout overflow"); + + // TODO: Implement a function like alloc that takes a layout. let (memory_region, paddr) = + let (memory_region, paddr) = + MemoryRegion::contiguous_physical(layout.size() as u64).expect("Failed to allocate"); + + let ptr: *mut T = memory_region.mut_ptr_at_offset(0); + for i in 0..len { + unsafe { + ptr.add(i).write(default.clone()); + } + } + + let slice_ptr = core::ptr::slice_from_raw_parts_mut(ptr, len); + + Self { + // UNWRAP: We know this isn't null. + data: NonNull::new(slice_ptr).unwrap(), + region: memory_region, + physical_address: paddr as usize, + _marker: PhantomData, + } + } + + pub fn from_vec(mut vec: Vec) -> Self { + let len = vec.len(); + let layout = core::alloc::Layout::array::(len).expect("Layout overflow"); + + // TODO: Implement a function like alloc that takes a layout. + let (memory_region, paddr) = + MemoryRegion::contiguous_physical(layout.size() as u64).expect("Failed to allocate"); + + let ptr: *mut T = memory_region.mut_ptr_at_offset(0); + for (i, item) in vec.into_iter().enumerate() { + unsafe { + ptr.add(i).write(item); + } + } + + let slice_ptr = core::ptr::slice_from_raw_parts_mut(ptr, len); + + Self { + // UNWRAP: We know this isn't null. + data: NonNull::new(slice_ptr).unwrap(), + region: memory_region, + physical_address: paddr as usize, + _marker: PhantomData, + } + } + pub fn len(&self) -> usize { + (**self).len() + } +} + +impl Index for PhysicalBox<[T]> +where + I: slice::SliceIndex<[T]>, +{ + type Output = I::Output; + + fn index(&self, index: I) -> &Self::Output { + &(**self)[index] + } +} + +impl IndexMut for PhysicalBox<[T]> +where + I: slice::SliceIndex<[T]>, +{ + fn index_mut(&mut self, index: I) -> &mut Self::Output { + &mut (**self)[index] + } +} + +impl Drop for PhysicalBox { + fn drop(&mut self) { + // SAFETY: + // - We own this data. + unsafe { core::ptr::drop_in_place(self.data.as_ptr()) } + } +}