261 lines
7.3 KiB
Rust
261 lines
7.3 KiB
Rust
use crate::cap::Capability;
|
|
use crate::syscall;
|
|
use crate::zion::ZError;
|
|
use alloc::slice;
|
|
use core::fmt::Debug;
|
|
use core::ops::Deref;
|
|
use core::ptr::{addr_of, addr_of_mut, read_volatile, write_volatile, NonNull};
|
|
|
|
#[cfg(feature = "hosted")]
|
|
use linked_list_allocator::LockedHeap;
|
|
|
|
#[cfg(feature = "hosted")]
|
|
#[global_allocator]
|
|
static ALLOCATOR: LockedHeap = LockedHeap::empty();
|
|
|
|
pub static mut CAN_ALLOC: bool = false;
|
|
|
|
#[cfg(feature = "hosted")]
|
|
pub fn init_heap() {
|
|
// 1 MiB
|
|
let size = 0x10_0000;
|
|
let vmmo_cap = syscall::memory_object_create(size).expect("Failed to create memory object");
|
|
let vaddr = syscall::address_space_map(&vmmo_cap).expect("Failed to map memory object");
|
|
unsafe {
|
|
ALLOCATOR.lock().init(vaddr as *mut u8, size as usize);
|
|
CAN_ALLOC = true;
|
|
}
|
|
}
|
|
|
|
pub struct MemoryRegion {
|
|
mem_cap: Capability,
|
|
virt_addr: u64,
|
|
// TODO: This should be a usize probably.
|
|
size: u64,
|
|
}
|
|
|
|
impl MemoryRegion {
|
|
pub fn direct_physical(paddr: u64, size: u64) -> Result<Self, ZError> {
|
|
let mem_cap = syscall::memory_object_direct_physical(paddr, size)?;
|
|
let virt_addr = syscall::address_space_map(&mem_cap)?;
|
|
Ok(Self {
|
|
mem_cap,
|
|
virt_addr,
|
|
size,
|
|
})
|
|
}
|
|
|
|
pub fn contiguous_physical(size: u64) -> Result<(Self, u64), ZError> {
|
|
let (mem_cap, paddr) = syscall::memory_object_contiguous_physical(size)?;
|
|
let virt_addr = syscall::address_space_map(&mem_cap)?;
|
|
Ok((
|
|
Self {
|
|
mem_cap,
|
|
virt_addr,
|
|
size,
|
|
},
|
|
paddr,
|
|
))
|
|
}
|
|
|
|
pub fn from_cap(mem_cap: Capability) -> Result<Self, ZError> {
|
|
let virt_addr = syscall::address_space_map(&mem_cap)?;
|
|
let size = syscall::memory_object_inspect(&mem_cap)?;
|
|
Ok(Self {
|
|
mem_cap,
|
|
virt_addr,
|
|
size,
|
|
})
|
|
}
|
|
|
|
pub fn new(size: u64) -> Result<Self, ZError> {
|
|
let mem_cap = syscall::memory_object_create(size)?;
|
|
let virt_addr = syscall::address_space_map(&mem_cap)?;
|
|
Ok(Self {
|
|
mem_cap,
|
|
virt_addr,
|
|
size,
|
|
})
|
|
}
|
|
|
|
pub fn slice<T>(&self) -> &[T] {
|
|
unsafe {
|
|
slice::from_raw_parts(
|
|
self.virt_addr as *const T,
|
|
self.size as usize / size_of::<T>(),
|
|
)
|
|
}
|
|
}
|
|
|
|
pub fn mut_slice<T>(&self) -> &mut [T] {
|
|
unsafe {
|
|
slice::from_raw_parts_mut(
|
|
self.virt_addr as *mut T,
|
|
self.size as usize / size_of::<T>(),
|
|
)
|
|
}
|
|
}
|
|
|
|
pub fn zero_region(&self) {
|
|
for i in self.mut_slice() {
|
|
*i = 0;
|
|
}
|
|
}
|
|
|
|
pub fn raw_ptr_at_offset<T>(&self, offset: u64) -> *const T {
|
|
assert!(offset + size_of::<T>() as u64 <= self.size);
|
|
(self.virt_addr + offset) as *const T
|
|
}
|
|
|
|
pub fn mut_ptr_at_offset<T>(&self, offset: usize) -> *mut T {
|
|
assert!(offset + size_of::<T>() <= self.size as usize);
|
|
(self.virt_addr as usize + offset) as *mut T
|
|
}
|
|
|
|
/// Creates a reference from a given offset.
|
|
///
|
|
/// SAFETY: Caller must ensure that the memory pointed to by this
|
|
/// pointer must not get mutated while the reference exists.
|
|
pub unsafe fn as_ref_at_offset<T>(&self, offset: usize) -> &T {
|
|
let ptr: *const T = self.raw_ptr_at_offset(offset as u64);
|
|
assert!(ptr.is_aligned(), "");
|
|
// SAFETY:
|
|
// - We checked alignment.
|
|
// - self.vaddr + offset can't be null.
|
|
// - It is dereferenceable because it is entirely within this memory region.
|
|
&*self.raw_ptr_at_offset::<T>(offset as u64)
|
|
}
|
|
|
|
/// Creates a reference from a given offset.
|
|
///
|
|
/// SAFETY: Caller must ensure that this is the only reference to the memory pointed
|
|
/// to by this pointer.
|
|
pub unsafe fn as_mut_ref_at_offset<T>(&self, offset: usize) -> &mut T {
|
|
let ptr: *const T = self.raw_ptr_at_offset(offset as u64);
|
|
assert!(ptr.is_aligned(), "");
|
|
// SAFETY:
|
|
// - We checked alignment.
|
|
// - self.vaddr + offset can't be null.
|
|
// - It is dereferenceable because it is entirely within this memory region.
|
|
&mut *self.mut_ptr_at_offset::<T>(offset)
|
|
}
|
|
|
|
pub fn cap(&self) -> &Capability {
|
|
&self.mem_cap
|
|
}
|
|
|
|
pub fn size(&self) -> u64 {
|
|
self.size
|
|
}
|
|
|
|
pub fn duplicate(&self, offset: u64, length: u64) -> Result<Capability, ZError> {
|
|
syscall::memory_obj_duplicate(&self.mem_cap, offset, length)
|
|
}
|
|
}
|
|
|
|
impl<T> AsRef<T> for MemoryRegion {
|
|
fn as_ref(&self) -> &T {
|
|
unsafe { (self.virt_addr as *const T).as_ref().unwrap() }
|
|
}
|
|
}
|
|
|
|
impl<T> AsMut<T> for MemoryRegion {
|
|
fn as_mut(&mut self) -> &mut T {
|
|
unsafe { (self.virt_addr as *mut T).as_mut().unwrap() }
|
|
}
|
|
}
|
|
|
|
impl Drop for MemoryRegion {
|
|
fn drop(&mut self) {
|
|
// FIXME: We shouldn't have to do this manual adjustment.
|
|
let mut max = self.virt_addr + self.size;
|
|
if (max & 0xFFF) != 0 {
|
|
max += 0x1000 - (max & 0xFFF);
|
|
}
|
|
syscall::address_space_unmap(self.virt_addr, max).expect("Failed to unmap memory");
|
|
}
|
|
}
|
|
|
|
#[repr(transparent)]
|
|
pub struct Volatile<T: Copy>(T);
|
|
|
|
impl<T: Copy> Volatile<T> {
|
|
pub fn read(&self) -> T {
|
|
unsafe { read_volatile(addr_of!(self.0)) }
|
|
}
|
|
|
|
pub fn write(&mut self, data: T) {
|
|
unsafe {
|
|
write_volatile(addr_of_mut!(self.0), data);
|
|
}
|
|
}
|
|
|
|
pub fn update<F>(&mut self, func: F)
|
|
where
|
|
F: Fn(&mut T),
|
|
{
|
|
let mut data = self.read();
|
|
func(&mut data);
|
|
self.write(data);
|
|
}
|
|
}
|
|
|
|
impl<T> Debug for Volatile<T>
|
|
where
|
|
T: Debug + Copy,
|
|
{
|
|
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
|
|
write!(f, "{:?}", self.read())
|
|
}
|
|
}
|
|
|
|
#[macro_export]
|
|
macro_rules! read_unaligned_volatile {
|
|
($struct_ptr:expr, $field:ident) => {
|
|
unsafe {
|
|
let field_ptr = core::ptr::addr_of!((*$struct_ptr).$field);
|
|
core::ptr::read_volatile(field_ptr as *const _)
|
|
}
|
|
};
|
|
}
|
|
|
|
#[macro_export]
|
|
macro_rules! write_unaligned_volatile {
|
|
($struct_ptr:expr, $field:ident, $value:expr) => {
|
|
unsafe {
|
|
let field_ptr = core::ptr::addr_of!((*$struct_ptr).$field);
|
|
core::ptr::write_volatile(field_ptr as *mut _, $value);
|
|
}
|
|
};
|
|
}
|
|
|
|
#[macro_export]
|
|
macro_rules! map_unaligned_volatile {
|
|
($struct_ptr:expr, $field:ident, $func:expr) => {
|
|
unsafe {
|
|
let field_ptr = core::ptr::addr_of!((*$struct_ptr).$field);
|
|
let value = core::ptr::read_volatile(field_ptr as *const _);
|
|
core::ptr::write_volatile(field_ptr as *mut _, ($func)(value));
|
|
}
|
|
};
|
|
}
|
|
|
|
pub fn map_cap_and_leak(mem_cap: Capability) -> u64 {
|
|
let vaddr = syscall::address_space_map(&mem_cap).unwrap();
|
|
mem_cap.release();
|
|
vaddr
|
|
}
|
|
|
|
pub fn map_direct_physical_and_leak(paddr: u64, size: u64) -> u64 {
|
|
let mem_cap = syscall::memory_object_direct_physical(paddr, size).unwrap();
|
|
let vaddr = syscall::address_space_map(&mem_cap).unwrap();
|
|
mem_cap.release();
|
|
vaddr
|
|
}
|
|
|
|
pub fn map_physical_and_leak(size: u64) -> (u64, u64) {
|
|
let (mem_cap, paddr) = syscall::memory_object_contiguous_physical(size).unwrap();
|
|
let vaddr = syscall::address_space_map(&mem_cap).unwrap();
|
|
mem_cap.release();
|
|
(vaddr, paddr)
|
|
}
|