Rust XHCI Implementation.

This commit is contained in:
Drew 2025-12-05 22:01:13 -08:00
parent 8b022a6b24
commit 7bbbf79bd6
26 changed files with 1901 additions and 209 deletions

View file

@ -3,7 +3,6 @@ use crate::syscall;
use crate::zion::ZError;
use alloc::slice;
use core::fmt::Debug;
use core::ops::Deref;
use core::ptr::{addr_of, addr_of_mut, read_volatile, write_volatile, NonNull};
#[cfg(feature = "hosted")]
@ -78,6 +77,10 @@ impl MemoryRegion {
})
}
pub fn vaddr(&self) -> usize {
self.virt_addr as usize
}
pub fn slice<T>(&self) -> &[T] {
unsafe {
slice::from_raw_parts(
@ -96,7 +99,7 @@ impl MemoryRegion {
}
}
pub fn zero_region(&self) {
pub fn zero_region(&mut self) {
for i in self.mut_slice() {
*i = 0;
}
@ -114,8 +117,9 @@ impl MemoryRegion {
/// Creates a reference from a given offset.
///
/// SAFETY: Caller must ensure that the memory pointed to by this
/// pointer must not get mutated while the reference exists.
/// # Safety
/// - Caller must ensure that the memory pointed to by this
/// pointer must not get mutated while the reference exists.
pub unsafe fn as_ref_at_offset<T>(&self, offset: usize) -> &T {
let ptr: *const T = self.raw_ptr_at_offset(offset as u64);
assert!(ptr.is_aligned(), "");
@ -128,9 +132,10 @@ impl MemoryRegion {
/// Creates a reference from a given offset.
///
/// SAFETY: Caller must ensure that this is the only reference to the memory pointed
/// to by this pointer.
pub unsafe fn as_mut_ref_at_offset<T>(&self, offset: usize) -> &mut T {
/// # Safety
/// - Caller must ensure that this is the only reference to the memory pointed
/// to by this pointer.
pub unsafe fn as_mut_ref_at_offset<T>(&mut self, offset: usize) -> &mut T {
let ptr: *const T = self.raw_ptr_at_offset(offset as u64);
assert!(ptr.is_aligned(), "");
// SAFETY:
@ -246,11 +251,12 @@ pub fn map_cap_and_leak(mem_cap: Capability) -> u64 {
vaddr
}
pub fn map_direct_physical_and_leak(paddr: u64, size: u64) -> u64 {
let mem_cap = syscall::memory_object_direct_physical(paddr, size).unwrap();
pub fn map_direct_physical_and_leak<T>(paddr: usize, size: usize) -> NonNull<T> {
let mem_cap = syscall::memory_object_direct_physical(paddr as u64, size as u64).unwrap();
let vaddr = syscall::address_space_map(&mem_cap).unwrap();
mem_cap.release();
vaddr
// UNWRAP: The kernel guarantees this is valid.
NonNull::new(vaddr as *mut T).unwrap()
}
pub fn map_physical_and_leak(size: u64) -> (u64, u64) {

View file

@ -4,17 +4,34 @@ use core::{
ptr::NonNull,
};
use alloc::{slice, vec::Vec};
use alloc::{boxed::Box, slice, vec::Vec};
use crate::mem::MemoryRegion;
pub struct PhysicalBox<T: ?Sized> {
data: NonNull<T>,
#[allow(dead_code)]
region: MemoryRegion,
physical_address: usize,
_marker: PhantomData<T>,
}
impl<T> PhysicalBox<T> {
pub fn new(data: T) -> Self {
let (memory_region, paddr) =
MemoryRegion::contiguous_physical(size_of::<T>() as u64).expect("Failed to allocate");
// UNWRAP: We know this isn't null.
let ptr = NonNull::new(memory_region.mut_ptr_at_offset(0)).unwrap();
unsafe { ptr.write(data) };
Self {
data: ptr,
region: memory_region,
physical_address: paddr as usize,
_marker: PhantomData,
}
}
}
impl<T: ?Sized> PhysicalBox<T> {
pub fn physical_address(&self) -> usize {
self.physical_address
@ -50,7 +67,7 @@ impl<T> PhysicalBox<[T]> {
{
let layout = core::alloc::Layout::array::<T>(len).expect("Layout overflow");
// TODO: Implement a function like alloc that takes a layout. let (memory_region, paddr) =
// TODO: Implement a function like alloc that takes a layout.
let (memory_region, paddr) =
MemoryRegion::contiguous_physical(layout.size() as u64).expect("Failed to allocate");
@ -72,7 +89,7 @@ impl<T> PhysicalBox<[T]> {
}
}
pub fn from_vec(mut vec: Vec<T>) -> Self {
pub fn from_vec(vec: Vec<T>) -> Self {
let len = vec.len();
let layout = core::alloc::Layout::array::<T>(len).expect("Layout overflow");
@ -100,6 +117,10 @@ impl<T> PhysicalBox<[T]> {
pub fn len(&self) -> usize {
(**self).len()
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
}
impl<I, T> Index<I> for PhysicalBox<[T]>
@ -122,6 +143,13 @@ where
}
}
/// SAFETY: We are the only owner of this pointer.
unsafe impl<T: ?Sized> Send for PhysicalBox<T> where Box<T>: Send {}
/// SAFETY: You must have a mutable reference to this
/// type to modify the data at the pointer.
unsafe impl<T: ?Sized> Sync for PhysicalBox<T> where Box<T>: Sync {}
impl<T: ?Sized> Drop for PhysicalBox<T> {
fn drop(&mut self) {
// SAFETY:

View file

@ -122,6 +122,7 @@ impl Executor {
}
}
#[derive(Clone)]
pub struct Spawner {
tasks: Arc<Mutex<BTreeMap<TaskId, Task>>>,
task_queue: Arc<Mutex<VecDeque<TaskId>>>,

View file

@ -71,15 +71,15 @@ impl PciDevice {
control.capable_address_64(),
"We don't handle the non-64bit case for MSI yet."
);
assert!(
control.multi_message_capable() == 0,
"We don't yet handle multi-message capable devices."
);
if control.multi_message_capable() != 0 {
mammoth::debug!("WARN: We don't yet handle multi-message capable devices.");
}
// FIXME: These probably need to be volatile writes.
let header: &mut PciDeviceHeader = self.memory_region.as_mut();
header.command = header.command.with_interrupt_disable(true);
msi_cap.msi_control = control.with_msi_enable(true);
msi_cap.msi_control = control.with_msi_enable(true).with_multi_message_enable(0);
// For setting addr and data field, see intel ref
// Vol 3. Section 11.11