Rust XHCI Implementation.

This commit is contained in:
Drew 2025-12-05 22:01:13 -08:00
parent 8b022a6b24
commit 592b5b468f
20 changed files with 1064 additions and 205 deletions

44
rust/Cargo.lock generated
View file

@ -2,12 +2,6 @@
# It is not intended for manual editing.
version = 4
[[package]]
name = "autocfg"
version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0"
[[package]]
name = "bitfield-struct"
version = "0.8.0"
@ -80,11 +74,10 @@ dependencies = [
[[package]]
name = "lock_api"
version = "0.4.12"
version = "0.4.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17"
checksum = "224399e74b87b5f3557511d98dff8b14089b3dadafcab6bb93eab67d3aace965"
dependencies = [
"autocfg",
"scopeguard",
]
@ -105,9 +98,9 @@ dependencies = [
[[package]]
name = "prettyplease"
version = "0.2.20"
version = "0.2.37"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5f12335488a2f3b0a83b14edad48dca9879ce89b2edd10e80237e4e852dd645e"
checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b"
dependencies = [
"proc-macro2",
"syn",
@ -115,18 +108,18 @@ dependencies = [
[[package]]
name = "proc-macro2"
version = "1.0.86"
version = "1.0.103"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77"
checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8"
dependencies = [
"unicode-ident",
]
[[package]]
name = "quote"
version = "1.0.36"
version = "1.0.42"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7"
checksum = "a338cc41d27e6cc6dce6cefc13a0729dfbb81c262b1f519331575dd80ef3067f"
dependencies = [
"proc-macro2",
]
@ -148,9 +141,9 @@ dependencies = [
[[package]]
name = "syn"
version = "2.0.72"
version = "2.0.111"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dc4b9b9bf2add8093d3f2c0204471e951b2285580335de42f9d2534f3ae7a8af"
checksum = "390cc9a294ab71bdb1aa2e99d13be9c753cd2d7bd6560c77118597410c4d2e87"
dependencies = [
"proc-macro2",
"quote",
@ -178,15 +171,15 @@ dependencies = [
[[package]]
name = "unicode-ident"
version = "1.0.12"
version = "1.0.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5"
[[package]]
name = "unicode-segmentation"
version = "1.11.0"
version = "1.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202"
checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493"
[[package]]
name = "victoriafalls"
@ -200,12 +193,21 @@ dependencies = [
"yunqc",
]
[[package]]
name = "volatile"
version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "af8ca9a5d4debca0633e697c88269395493cebf2e10db21ca2dbde37c1356452"
[[package]]
name = "voyageurs"
version = "0.1.0"
dependencies = [
"bitfield-struct 0.12.1",
"mammoth",
"pci",
"volatile",
"yellowstone-yunq",
]
[[package]]

View file

@ -3,7 +3,6 @@ use crate::syscall;
use crate::zion::ZError;
use alloc::slice;
use core::fmt::Debug;
use core::ops::Deref;
use core::ptr::{addr_of, addr_of_mut, read_volatile, write_volatile, NonNull};
#[cfg(feature = "hosted")]
@ -78,6 +77,10 @@ impl MemoryRegion {
})
}
pub fn vaddr(&self) -> usize {
self.virt_addr as usize
}
pub fn slice<T>(&self) -> &[T] {
unsafe {
slice::from_raw_parts(
@ -96,7 +99,7 @@ impl MemoryRegion {
}
}
pub fn zero_region(&self) {
pub fn zero_region(&mut self) {
for i in self.mut_slice() {
*i = 0;
}
@ -114,8 +117,9 @@ impl MemoryRegion {
/// Creates a reference from a given offset.
///
/// SAFETY: Caller must ensure that the memory pointed to by this
/// pointer must not get mutated while the reference exists.
/// # Safety
/// - Caller must ensure that the memory pointed to by this
/// pointer must not get mutated while the reference exists.
pub unsafe fn as_ref_at_offset<T>(&self, offset: usize) -> &T {
let ptr: *const T = self.raw_ptr_at_offset(offset as u64);
assert!(ptr.is_aligned(), "");
@ -128,9 +132,10 @@ impl MemoryRegion {
/// Creates a reference from a given offset.
///
/// SAFETY: Caller must ensure that this is the only reference to the memory pointed
/// to by this pointer.
pub unsafe fn as_mut_ref_at_offset<T>(&self, offset: usize) -> &mut T {
/// # Safety
/// - Caller must ensure that this is the only reference to the memory pointed
/// to by this pointer.
pub unsafe fn as_mut_ref_at_offset<T>(&mut self, offset: usize) -> &mut T {
let ptr: *const T = self.raw_ptr_at_offset(offset as u64);
assert!(ptr.is_aligned(), "");
// SAFETY:
@ -246,11 +251,12 @@ pub fn map_cap_and_leak(mem_cap: Capability) -> u64 {
vaddr
}
pub fn map_direct_physical_and_leak(paddr: u64, size: u64) -> u64 {
let mem_cap = syscall::memory_object_direct_physical(paddr, size).unwrap();
pub fn map_direct_physical_and_leak<T>(paddr: usize, size: usize) -> NonNull<T> {
let mem_cap = syscall::memory_object_direct_physical(paddr as u64, size as u64).unwrap();
let vaddr = syscall::address_space_map(&mem_cap).unwrap();
mem_cap.release();
vaddr
// UNWRAP: The kernel guarantees this is valid.
NonNull::new(vaddr as *mut T).unwrap()
}
pub fn map_physical_and_leak(size: u64) -> (u64, u64) {

View file

@ -4,17 +4,34 @@ use core::{
ptr::NonNull,
};
use alloc::{slice, vec::Vec};
use alloc::{boxed::Box, slice, vec::Vec};
use crate::mem::MemoryRegion;
pub struct PhysicalBox<T: ?Sized> {
data: NonNull<T>,
#[allow(dead_code)]
region: MemoryRegion,
physical_address: usize,
_marker: PhantomData<T>,
}
impl<T> PhysicalBox<T> {
pub fn new(data: T) -> Self {
let (memory_region, paddr) =
MemoryRegion::contiguous_physical(size_of::<T>() as u64).expect("Failed to allocate");
// UNWRAP: We know this isn't null.
let ptr = NonNull::new(memory_region.mut_ptr_at_offset(0)).unwrap();
unsafe { ptr.write(data) };
Self {
data: ptr,
region: memory_region,
physical_address: paddr as usize,
_marker: PhantomData,
}
}
}
impl<T: ?Sized> PhysicalBox<T> {
pub fn physical_address(&self) -> usize {
self.physical_address
@ -50,7 +67,7 @@ impl<T> PhysicalBox<[T]> {
{
let layout = core::alloc::Layout::array::<T>(len).expect("Layout overflow");
// TODO: Implement a function like alloc that takes a layout. let (memory_region, paddr) =
// TODO: Implement a function like alloc that takes a layout.
let (memory_region, paddr) =
MemoryRegion::contiguous_physical(layout.size() as u64).expect("Failed to allocate");
@ -72,7 +89,7 @@ impl<T> PhysicalBox<[T]> {
}
}
pub fn from_vec(mut vec: Vec<T>) -> Self {
pub fn from_vec(vec: Vec<T>) -> Self {
let len = vec.len();
let layout = core::alloc::Layout::array::<T>(len).expect("Layout overflow");
@ -100,6 +117,10 @@ impl<T> PhysicalBox<[T]> {
pub fn len(&self) -> usize {
(**self).len()
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
}
impl<I, T> Index<I> for PhysicalBox<[T]>
@ -122,6 +143,13 @@ where
}
}
/// SAFETY: We are the only owner of this pointer.
unsafe impl<T: ?Sized> Send for PhysicalBox<T> where Box<T>: Send {}
/// SAFETY: You must have a mutable reference to this
/// type to modify the data at the pointer.
unsafe impl<T: ?Sized> Sync for PhysicalBox<T> where Box<T>: Sync {}
impl<T: ?Sized> Drop for PhysicalBox<T> {
fn drop(&mut self) {
// SAFETY:

View file

@ -122,6 +122,7 @@ impl Executor {
}
}
#[derive(Clone)]
pub struct Spawner {
tasks: Arc<Mutex<BTreeMap<TaskId, Task>>>,
task_queue: Arc<Mutex<VecDeque<TaskId>>>,

View file

@ -71,15 +71,15 @@ impl PciDevice {
control.capable_address_64(),
"We don't handle the non-64bit case for MSI yet."
);
assert!(
control.multi_message_capable() == 0,
"We don't yet handle multi-message capable devices."
);
if control.multi_message_capable() != 0 {
mammoth::debug!("WARN: We don't yet handle multi-message capable devices.");
}
// FIXME: These probably need to be volatile writes.
let header: &mut PciDeviceHeader = self.memory_region.as_mut();
header.command = header.command.with_interrupt_disable(true);
msi_cap.msi_control = control.with_msi_enable(true);
msi_cap.msi_control = control.with_msi_enable(true).with_multi_message_enable(0);
// For setting addr and data field, see intel ref
// Vol 3. Section 11.11

View file

@ -1,3 +1,5 @@
use core::ffi::c_void;
use alloc::sync::Arc;
use mammoth::{
cap::Capability,
@ -26,8 +28,11 @@ impl AhciController {
pub fn new(pci_memory: Capability) -> Self {
let pci_device = PciDevice::from_cap(pci_memory).unwrap();
let hba_vaddr =
mem::map_direct_physical_and_leak(pci_device.header().bars[5] as u64, 0x1100);
let hba_vaddr = mem::map_direct_physical_and_leak::<c_void>(
pci_device.header().bars[5] as usize,
0x1100,
)
.as_ptr() as u64;
let hba = unsafe { (hba_vaddr as *mut AhciHba).as_mut().unwrap() };
let mut controller = Self {
pci_device: Mutex::new(pci_device),

View file

@ -6,3 +6,10 @@ edition = "2024"
[dependencies]
bitfield-struct = "0.12"
mammoth = { path = "../../lib/mammoth/" }
pci = { path = "../../lib/pci" }
volatile = "0.6.1"
yellowstone-yunq = { version = "0.1.0", path = "../../lib/yellowstone" }
[features]
default = ["debug"]
debug = []

View file

@ -5,12 +5,47 @@ extern crate alloc;
mod xhci;
use mammoth::{debug, define_entry, zion::z_err_t};
use alloc::sync::Arc;
use mammoth::{
cap::Capability,
debug, define_entry,
sync::Mutex,
task::{Executor, Task},
zion::z_err_t,
};
use pci::PciDevice;
use xhci::driver::XHCIDriver;
define_entry!();
#[unsafe(no_mangle)]
extern "C" fn main() -> z_err_t {
debug!("In Voyageurs");
#[cfg(feature = "debug")]
debug!("Voyageurs Starting.");
let yellowstone = yellowstone_yunq::from_init_endpoint();
let xhci_info = yellowstone
.get_xhci_info()
.expect("Failed to get XHCI info from yellowstone.");
let pci_device = PciDevice::from_cap(Capability::take(xhci_info.xhci_region)).unwrap();
let xhci_driver = Arc::new(XHCIDriver::from_pci_device(pci_device));
let executor = Arc::new(Mutex::new(Executor::new()));
let driver_clone = xhci_driver.clone();
let spawner = executor.clone().lock().new_spawner();
let interrupt_thread = mammoth::thread::spawn(move || driver_clone.interrupt_loop(spawner));
executor
.clone()
.lock()
.spawn(Task::new(async move { xhci_driver.startup().await }));
executor.clone().lock().run();
interrupt_thread.join().unwrap();
0
}

View file

@ -0,0 +1,71 @@
use bitfield_struct::bitfield;
use crate::xhci::data_structures::{TransferRequestBlock, TrbType};
pub struct EnableSlotTrb {}
#[bitfield(u128)]
pub struct EnableSlotCommand {
__: u64,
__: u32,
#[bits(10)]
__: u16,
#[bits(6, default=TrbType::EnableSlotCommand)]
trb_type: TrbType,
#[bits(5)]
slot_type: u8,
#[bits(11)]
__: u16,
}
impl From<EnableSlotCommand> for CommandTrb {
fn from(value: EnableSlotCommand) -> Self {
Self(value.into_bits().into())
}
}
#[bitfield(u128)]
pub struct AddressDeviceCommand {
pub input_context_pointer: u64,
__: u32,
#[bits(9)]
__: u16,
pub block_set_address_request: bool,
#[bits(6, default=TrbType::AddressDeviceCommand)]
trb_typ: TrbType,
__: u8,
pub slot_id: u8,
}
impl From<AddressDeviceCommand> for CommandTrb {
fn from(value: AddressDeviceCommand) -> Self {
Self(value.into_bits().into())
}
}
#[bitfield(u128)]
pub struct NoOpCommand {
__: u64,
__: u32,
cycle: bool,
#[bits(9)]
__: u16,
#[bits(6, default = TrbType::NoOpCommand)]
trb_type: TrbType,
__: u16,
}
impl From<NoOpCommand> for CommandTrb {
fn from(value: NoOpCommand) -> Self {
Self(value.into_bits().into())
}
}
/// Simple type to ensure we are only sending commands to command rings.
pub struct CommandTrb(TransferRequestBlock);
impl From<CommandTrb> for TransferRequestBlock {
fn from(value: CommandTrb) -> Self {
value.0
}
}

View file

@ -0,0 +1,139 @@
use bitfield_struct::{bitenum, bitfield};
use crate::xhci::data_structures::{TransferRequestBlock, TrbType};
#[bitenum]
#[repr(u8)]
#[derive(Debug, Eq, PartialEq)]
pub enum CommandCompletionCode {
#[fallback]
#[allow(dead_code)]
Invalid = 0,
Success = 1,
}
#[bitfield(u128)]
pub struct TransferEvent {
pub transfer_trb_pointer: u64,
#[bits(24)]
pub trb_transfer_lenght: u32,
/// Completion Code. This field encodes the completion status of the command that generated the
/// event. Refer to the respective command definition for a list of the possible Completion Codes
/// associated with the command. Refer to section 6.4.5 for an enumerated list of possible error
/// conditions.
#[bits(8)]
pub completion_code: CommandCompletionCode,
#[bits(10)]
__: u16,
/// TRB Type. This field identifies the type of the TRB. Refer to Table 6-91 for the definition of the
/// Command Completion Event TRB type ID
#[bits(6, default=TrbType::TransferEvent)]
pub trb_type: TrbType,
#[bits(5)]
pub endpoint_id: u8,
#[bits(3)]
__: u8,
pub slot_id: u8,
}
#[bitfield(u128)]
pub struct CommandCompletionEvent {
/// Command TRB Pointer Hi and Lo. This field represents the high order bits of the 64-bit address
/// of the Command TRB that generated this event. Note that this field is not valid for some
/// Completion Code values. Refer to Table 6-90 for specific cases.
///
/// The memory structure referenced by this physical memory pointer shall be aligned on a 16-byte
/// address boundary.
pub command_trb_pointer: u64,
/// Command Completion Parameter. This field may optionally be set by a command. Refer to
/// section 4.6.6.1 for specific usage. If a command does not utilize this field it shall be treated as
/// RsvdZ.
#[bits(24)]
pub command_completion_parameter: u64,
/// Completion Code. This field encodes the completion status of the command that generated the
/// event. Refer to the respective command definition for a list of the possible Completion Codes
/// associated with the command. Refer to section 6.4.5 for an enumerated list of possible error
/// conditions.
#[bits(8)]
pub completion_code: CommandCompletionCode,
/// Cycle bit (C). This bit is used to mark the Dequeue Pointer of an Event Ring
pub cycle_bit: bool,
#[bits(9)]
__: u16,
/// TRB Type. This field identifies the type of the TRB. Refer to Table 6-91 for the definition of the
/// Command Completion Event TRB type ID
#[bits(6, default=TrbType::CommandCompletionEvent)]
pub trb_type: TrbType,
/// VF ID. The ID of the Virtual Function that generated the event. Note that this field is valid only if
/// Virtual Functions are enabled. If they are not enabled this field shall be cleared to 0.
pub vf_id: u8,
/// Slot ID. The Slot ID field shall be updated by the xHC to reflect the slot associated with the
/// command that generated the event, with the following exceptions:
///
/// - The Slot ID shall be cleared to 0 for No Op, Set Latency Tolerance Value, Get Port Bandwidth,
/// and Force Event Commands.
///
/// - The Slot ID shall be set to the ID of the newly allocated Device Slot for the Enable Slot
/// Command.
///
/// - The value of Slot ID shall be vendor defined when generated by a vendor defined command.
///
/// This value is used as an index in the Device Context Base Address Array to select the Device
/// Context of the source device. If this Event is due to a Host Controller Command, then this field
/// shall be cleared to 0.
pub slot_id: u8,
}
#[bitfield(u128)]
pub struct PortStatusChangeEvent {
#[bits(24)]
__: u32,
pub port_id: u8,
__: u32,
#[bits(24)]
__: u32,
#[bits(8)]
pub completion_code: CommandCompletionCode,
#[bits(10)]
__: u16,
#[bits(6, default=TrbType::PortStatusChangeEvent)]
trb_type: TrbType,
__: u16,
}
pub enum EventTrb {
Transfer(TransferEvent),
CommandCompletion(CommandCompletionEvent),
PortStatusChange(PortStatusChangeEvent),
BandwidthRequest(TransferRequestBlock),
Doorbell(TransferRequestBlock),
HostController(TransferRequestBlock),
DeviceNotification(TransferRequestBlock),
MFINDEXWrap(TransferRequestBlock),
}
impl From<TransferRequestBlock> for EventTrb {
fn from(value: TransferRequestBlock) -> Self {
match value.trb_type() {
TrbType::TransferEvent => {
EventTrb::Transfer(TransferEvent::from_bits(value.into_bits()))
}
TrbType::CommandCompletionEvent => {
EventTrb::CommandCompletion(CommandCompletionEvent::from_bits(value.into_bits()))
}
TrbType::PortStatusChangeEvent => {
EventTrb::PortStatusChange(PortStatusChangeEvent::from_bits(value.into_bits()))
}
TrbType::BandwidthRequestEvent => EventTrb::BandwidthRequest(value),
TrbType::DoorbellEvent => EventTrb::Doorbell(value),
TrbType::HostControllerEvent => EventTrb::HostController(value),
TrbType::DeviceNotificationEvent => EventTrb::DeviceNotification(value),
TrbType::MFINDEXWrapEvent => EventTrb::MFINDEXWrap(value),
t => panic!("Unknown trb type on event ring: {:?}", t),
}
}
}

View file

@ -1,14 +1,18 @@
mod command_trb;
mod device_context;
mod endpoint_context;
mod event_ring_segment_table;
mod event_trb;
mod input_context;
mod slot_context;
mod trb;
mod trb_ring_segment;
pub use command_trb::*;
pub use device_context::*;
pub use endpoint_context::*;
pub use event_ring_segment_table::*;
pub use event_trb::*;
pub use input_context::*;
pub use slot_context::*;
pub use trb::*;

View file

@ -75,29 +75,6 @@ where
}
}
#[bitfield(u128)]
pub struct TrbNoOp {
__: u64,
#[bits(22)]
__: u32,
#[bits(10, default = 0)]
interrupter_target: u16,
cycle: bool,
evaluate_next: bool,
__: bool,
__: bool,
chain: bool,
#[bits(default = true)]
interrupt_on_completion: bool,
#[bits(4)]
__: u8,
#[bits(6, default = TrbType::NoOpCommand)]
trb_type: TrbType,
__: u16,
}
impl TypedTrb for TrbNoOp {}
#[bitfield(u128)]
pub struct TrbLink {
/// Ring Segment Pointer Hi and Lo. These fields represent the high order bits of the 64-bit base
@ -141,139 +118,3 @@ pub struct TrbLink {
}
impl TypedTrb for TrbLink {}
#[bitfield(u128)]
pub struct TrbTransferEvent {
pub transfer_trb_pointer: u64,
#[bits(24)]
pub trb_transfer_lenght: u32,
/// Completion Code. This field encodes the completion status of the command that generated the
/// event. Refer to the respective command definition for a list of the possible Completion Codes
/// associated with the command. Refer to section 6.4.5 for an enumerated list of possible error
/// conditions.
pub completion_code: u8,
#[bits(10)]
__: u16,
/// TRB Type. This field identifies the type of the TRB. Refer to Table 6-91 for the definition of the
/// Command Completion Event TRB type ID
#[bits(6, default=TrbType::TransferEvent)]
pub trb_type: TrbType,
#[bits(5)]
pub endpoint_id: u8,
#[bits(3)]
__: u8,
pub slot_id: u8,
}
impl TypedTrb for TrbTransferEvent {}
#[bitenum]
#[repr(u8)]
pub enum CommandCompletionCode {
#[fallback]
#[allow(dead_code)]
Invalid = 0,
Success = 1,
}
#[bitfield(u128)]
pub struct TrbCommandCompletion {
/// Command TRB Pointer Hi and Lo. This field represents the high order bits of the 64-bit address
/// of the Command TRB that generated this event. Note that this field is not valid for some
/// Completion Code values. Refer to Table 6-90 for specific cases.
///
/// The memory structure referenced by this physical memory pointer shall be aligned on a 16-byte
/// address boundary.
pub command_trb_pointer: u64,
/// Command Completion Parameter. This field may optionally be set by a command. Refer to
/// section 4.6.6.1 for specific usage. If a command does not utilize this field it shall be treated as
/// RsvdZ.
#[bits(24)]
pub command_completion_parameter: u64,
/// Completion Code. This field encodes the completion status of the command that generated the
/// event. Refer to the respective command definition for a list of the possible Completion Codes
/// associated with the command. Refer to section 6.4.5 for an enumerated list of possible error
/// conditions.
pub completion_code: u8,
/// Cycle bit (C). This bit is used to mark the Dequeue Pointer of an Event Ring
pub cycle_bit: bool,
#[bits(9)]
__: u16,
/// TRB Type. This field identifies the type of the TRB. Refer to Table 6-91 for the definition of the
/// Command Completion Event TRB type ID
#[bits(6, default=TrbType::CommandCompletionEvent)]
pub trb_type: TrbType,
/// VF ID. The ID of the Virtual Function that generated the event. Note that this field is valid only if
/// Virtual Functions are enabled. If they are not enabled this field shall be cleared to 0.
pub vf_id: u8,
/// Slot ID. The Slot ID field shall be updated by the xHC to reflect the slot associated with the
/// command that generated the event, with the following exceptions:
///
/// - The Slot ID shall be cleared to 0 for No Op, Set Latency Tolerance Value, Get Port Bandwidth,
/// and Force Event Commands.
///
/// - The Slot ID shall be set to the ID of the newly allocated Device Slot for the Enable Slot
/// Command.
///
/// - The value of Slot ID shall be vendor defined when generated by a vendor defined command.
///
/// This value is used as an index in the Device Context Base Address Array to select the Device
/// Context of the source device. If this Event is due to a Host Controller Command, then this field
/// shall be cleared to 0.
pub slot_id: u8,
}
impl TypedTrb for TrbCommandCompletion {}
#[bitfield(u128)]
pub struct TrbPortStatusChangeEvent {
#[bits(24)]
__: u32,
pub port_id: u8,
__: u32,
#[bits(24)]
__: u32,
pub completion_code: u8,
#[bits(10)]
__: u16,
#[bits(6, default=TrbType::PortStatusChangeEvent)]
trb_type: TrbType,
__: u16,
}
impl TypedTrb for TrbPortStatusChangeEvent {}
#[bitfield(u128)]
pub struct TrbEnableSlotCommand {
__: u64,
__: u32,
#[bits(10)]
__: u16,
#[bits(6, default=TrbType::EnableSlotCommand)]
trb_type: TrbType,
#[bits(5)]
slot_type: u8,
#[bits(11)]
__: u16,
}
impl TypedTrb for TrbEnableSlotCommand {}
#[bitfield(u128)]
pub struct TrbAddressDeviceCommand {
pub input_context_pointer: u64,
__: u32,
#[bits(9)]
__: u16,
pub block_set_address_request: bool,
#[bits(6, default=TrbType::AddressDeviceCommand)]
trb_typ: TrbType,
__: u8,
pub slot_id: u8,
}
impl TypedTrb for TrbAddressDeviceCommand {}

View file

@ -0,0 +1,113 @@
use alloc::boxed::Box;
use mammoth::physical_box::PhysicalBox;
use crate::xhci::{
data_structures::{
DeviceContext, EndpointContextFields, EndpointState, EndpointType, InputContext,
TRDequeuePointer, TransferEvent,
},
registers::DoorbellPointer,
trb_ring::TrbRing,
};
struct DeviceContextBaseArray(PhysicalBox<[u64]>);
impl DeviceContextBaseArray {
pub fn new(max_slots: u8) -> Self {
Self(PhysicalBox::default_with_count(0, max_slots as usize + 1))
}
}
pub struct DeviceSlot {
device_context: PhysicalBox<DeviceContext>,
endpoint_0_transfer_ring: TrbRing<TransferEvent>,
doorbell: DoorbellPointer,
}
impl DeviceSlot {
fn new(doorbell: DoorbellPointer) -> Self {
Self {
device_context: PhysicalBox::new(DeviceContext::default()),
endpoint_0_transfer_ring: TrbRing::new(),
doorbell,
}
}
pub fn send_control_command(&mut self) {
self.doorbell.ring(1);
}
}
pub struct DeviceSlotManager {
device_context_base_array: DeviceContextBaseArray,
slots: Box<[Option<DeviceSlot>]>,
doorbells: Box<[Option<DoorbellPointer>]>,
}
impl DeviceSlotManager {
pub fn new(max_slots: u8, doorbells: Box<[DoorbellPointer]>) -> Self {
assert!(
doorbells.len() == max_slots as usize,
"Got an incorrect doorbell slice size."
);
Self {
device_context_base_array: DeviceContextBaseArray::new(max_slots),
slots: core::iter::repeat_with(|| None)
.take(max_slots as usize)
.collect(),
doorbells: doorbells.into_iter().map(|d| Some(d)).collect(),
}
}
pub fn device_context_base_array_physical_address(&self) -> usize {
self.device_context_base_array.0.physical_address()
}
/// Prepares a slot and an input context for an address device command.
///
/// Follows section 4.6.5 of the XHCI spec.
pub fn prep_slot_for_address_device(
&mut self,
slot_id: u8,
port_number: u8,
) -> PhysicalBox<InputContext> {
// TODO: Ensure alignment
let device_slot = DeviceSlot::new(
self.doorbells[(slot_id - 1) as usize]
.take()
.expect("Slot already allocated."),
);
let mut input_context = PhysicalBox::new(InputContext::default());
// The Add Context flags for the Slot Context and the Endpoint 0 Context shall be set to 1.
input_context.input_control_context.add_context_flags = 0x3;
// See XHCI 4.5.2 for information
input_context.slot_context.fields = input_context
.slot_context
.fields
.with_root_hub_port_number(port_number)
.with_route_string(0)
.with_context_entries(1)
.with_interrupter_target(0);
// The Endpoint 0 Context data structure in the
// Input Context shall define valid values for the TR Dequeue Pointer, EP Type, Error
// Count (CErr), and Max Packet Size fields. The MaxPStreams, Max Burst Size, and
// EP State values shall be cleared to '0'
input_context.endpoint_context_0.tr_deque_pointer = TRDequeuePointer::new()
.with_pointer(device_slot.endpoint_0_transfer_ring.physical_base_address() as u64)
.with_dequeue_cycle_state(true);
input_context.endpoint_context_0.fields = EndpointContextFields::new()
.with_endpoint_type(EndpointType::Control)
.with_max_primary_streams(0)
.with_max_burst_size(0)
.with_endpoint_state(EndpointState::Disabled);
self.device_context_base_array.0[slot_id as usize] =
device_slot.device_context.physical_address() as u64;
self.slots[slot_id as usize - 1] = Some(device_slot);
input_context
}
}

View file

@ -0,0 +1,282 @@
use alloc::sync::Arc;
use mammoth::sync::Mutex;
use mammoth::task::Spawner;
use mammoth::task::Task;
use super::registers::{self};
use crate::xhci::data_structures::AddressDeviceCommand;
use crate::xhci::data_structures::CommandCompletionCode;
use crate::xhci::data_structures::CommandCompletionEvent;
use crate::xhci::data_structures::CommandTrb;
use crate::xhci::data_structures::EnableSlotCommand;
use crate::xhci::data_structures::EventTrb;
use crate::xhci::data_structures::NoOpCommand;
use crate::xhci::data_structures::PortStatusChangeEvent;
use crate::xhci::device_context_base_array::DeviceSlotManager;
use crate::xhci::interrupter::Interrupter;
use crate::xhci::registers::DoorbellPointer;
use crate::xhci::registers::HostControllerOperationalWrapper;
use crate::xhci::registers::InterrupterRegisterSet;
use crate::xhci::registers::PortStatusAndControl;
use crate::xhci::trb_ring::CommandRing;
pub struct XHCIDriver {
#[allow(dead_code)]
pci_device: pci::PciDevice,
capabilities: registers::HostControllerCapabilities,
operational: HostControllerOperationalWrapper,
command_ring: Mutex<CommandRing>,
// TODO: Add multiple interrupters.
interrupter: Mutex<Interrupter>,
device_slot_manager: Mutex<DeviceSlotManager>,
}
impl XHCIDriver {
pub fn from_pci_device(mut pci_device: pci::PciDevice) -> Self {
let address =
((pci_device.header().bars[1] as usize) << 32) | (pci_device.header().bars[0] as usize);
let irq_port_cap = pci_device.register_msi().unwrap();
let (operational, capabilities) = HostControllerOperationalWrapper::new(address);
let max_slots = capabilities.params_1.max_device_slots();
let doorbell_physical = address + capabilities.doorbell_offset as usize;
let (command_doorbell, slot_doorbells) =
DoorbellPointer::create_command_and_slots(doorbell_physical, max_slots);
// Offset to skip the mfindex register.
let interrupter_registers = mammoth::mem::map_direct_physical_and_leak(
address + capabilities.runtime_register_space_offset as usize,
size_of::<InterrupterRegisterSet>() * 2,
);
let interrupter_registers = unsafe { interrupter_registers.add(1) };
let mut driver = Self {
pci_device,
capabilities,
operational,
command_ring: Mutex::new(CommandRing::new(command_doorbell)),
interrupter: Mutex::new(Interrupter::new(interrupter_registers, irq_port_cap)),
device_slot_manager: Mutex::new(DeviceSlotManager::new(max_slots, slot_doorbells)),
};
driver.initialize();
driver
}
fn initialize(&mut self) {
#[cfg(feature = "debug")]
mammoth::debug!("Stopping XHCI Controller.");
// Stop the host controller.
self.operational
.update_command(|cmd| cmd.with_run_stop(false));
#[cfg(feature = "debug")]
mammoth::debug!("Waiting for controller to halt.");
// Sleep until the controller is halted.
let mut status = self.operational.read_status();
while !status.host_controller_halted() {
// TODO: Sleep for how long?
mammoth::syscall::thread_sleep(50).unwrap();
status = self.operational.read_status();
}
#[cfg(feature = "debug")]
mammoth::debug!("Resetting Controller.");
self.operational
.update_command(|cmd| cmd.with_host_controller_reset(true));
let mut command: registers::UsbCommand = self.operational.read_command();
while command.host_controller_reset() {
// TODO: Sleep for how long?
mammoth::syscall::thread_sleep(50).unwrap();
command = self.operational.read_command();
}
#[cfg(feature = "debug")]
mammoth::debug!("XHCI Controller Reset, waiting ready.");
let mut status = self.operational.read_status();
while status.controller_not_ready() {
// TODO: Sleep for how long?
mammoth::syscall::thread_sleep(50).unwrap();
status = self.operational.read_status();
}
#[cfg(feature = "debug")]
mammoth::debug!("XHCI Controller Ready.");
#[cfg(feature = "debug")]
mammoth::debug!("Setting Command Ring");
self.operational.set_command_ring_dequeue_pointer(
self.command_ring.lock().trb_ring.physical_base_address(),
true,
);
#[cfg(feature = "debug")]
mammoth::debug!("Setting DCBA.");
self.operational
.set_device_context_base_address_array_pointer(
self.device_slot_manager
.lock()
.device_context_base_array_physical_address(),
);
// We tell the controller that we can support as many slots as it does because
// we allocate a full 4K page to the DCBA, which is 256 entries and the max
// slots are 255.
self.operational.update_configure(|cfg| {
cfg.with_max_device_slots_enabled(self.capabilities.params_1.max_device_slots())
});
assert!(
self.capabilities.params_2.max_scratchpad_buffers() == 0,
"Unsupported scratchpad buffers."
);
#[cfg(feature = "debug")]
mammoth::debug!("Resetting event ring.");
// SAFETY: The HC is stopped.
unsafe { self.interrupter.lock().reset() };
self.operational
.update_command(|cmd| cmd.with_run_stop(true).with_interrupter_enable(true));
#[cfg(feature = "debug")]
mammoth::debug!("Enabled interrupts and controller.");
}
pub fn interrupt_loop(self: Arc<Self>, spawner: Spawner) {
let completion_handler = |trb| {
self.clone().handle_completion(spawner.clone(), trb);
};
self.interrupter.lock().interrupt_loop(completion_handler);
}
fn handle_completion(self: Arc<XHCIDriver>, spawner: Spawner, trb: EventTrb) {
match trb {
EventTrb::Transfer(trb) => {
mammoth::debug!("Unhandled transfer event: {:?}", trb);
}
EventTrb::CommandCompletion(trb) => {
self.command_ring
.lock()
.trb_ring
.handle_completion(trb, trb.command_trb_pointer() as usize);
}
EventTrb::PortStatusChange(trb) => {
let self_clone = self.clone();
spawner.spawn(Task::new(async move {
self_clone.port_status_change(trb).await
}));
}
EventTrb::BandwidthRequest(trb) => {
mammoth::debug!("Unhandled bandwidth request event: {:?}", trb);
}
EventTrb::Doorbell(trb) => {
mammoth::debug!("Unhandled doorbell event: {:?}", trb);
}
EventTrb::HostController(trb) => {
mammoth::debug!("Unhandled host controller event: {:?}", trb);
}
EventTrb::DeviceNotification(trb) => {
mammoth::debug!("Unhandled device notification event: {:?}", trb);
}
EventTrb::MFINDEXWrap(trb) => {
mammoth::debug!("Unhandled MFINDEX wrap event: {:?}", trb);
}
}
}
async fn send_command(&self, trb: CommandTrb) -> CommandCompletionEvent {
// Split the future and the await so the lock is dropped before we await.
let future = { self.command_ring.lock().enqueue_command(trb) };
future.await
}
pub async fn startup(&self) {
#[cfg(feature = "debug")]
mammoth::debug!("Sending no op command.");
let result = self.send_command(NoOpCommand::new().into()).await;
assert_eq!(result.completion_code(), CommandCompletionCode::Success);
#[cfg(feature = "debug")]
mammoth::debug!("Successfully tested no op command.");
#[cfg(feature = "debug")]
mammoth::debug!("Resetting all connected ports.");
for port_index in 0..self.operational.num_ports() {
self.operational
.update_port_status(port_index, |p| p.clear_change_bits());
}
for port_index in 0..self.operational.num_ports() {
let status = self.operational.get_port(port_index).status_and_control;
if status.port_power() && status.current_connect_status() {
mammoth::debug!("Resetting port {}", port_index);
self.operational.update_port_status(port_index, |_| {
PortStatusAndControl::new()
.with_port_reset(true)
.with_port_power(true)
});
}
}
}
async fn port_status_change(self: Arc<Self>, status_change: PortStatusChangeEvent) {
// Ports are indexed from 1.
let port_id = status_change.port_id();
let port_index = (port_id - 1) as usize;
let port_status = self.operational.get_port(port_index).status_and_control;
#[cfg(feature = "debug")]
mammoth::debug!("Port status change for port {}", port_id);
if !port_status.port_reset_change() {
mammoth::debug!(
"Unknown port status event, not handling. status= {:?}",
port_status
);
return;
}
self.operational
.update_port_status(port_index, |s| s.clear_change_bits());
#[cfg(feature = "debug")]
mammoth::debug!("Enabling slot.");
let resp = self.send_command(EnableSlotCommand::new().into()).await;
assert_eq!(resp.completion_code(), CommandCompletionCode::Success);
let slot = resp.slot_id();
#[cfg(feature = "debug")]
mammoth::debug!("Creating slot data structures in slot {}.", slot);
let input_context = self
.device_slot_manager
.lock()
.prep_slot_for_address_device(slot, port_id);
#[cfg(feature = "debug")]
mammoth::debug!("Sending address device.");
let resp = self
.send_command(
AddressDeviceCommand::new()
.with_slot_id(slot)
.with_input_context_pointer(input_context.physical_address() as u64)
.into(),
)
.await;
assert_eq!(resp.completion_code(), CommandCompletionCode::Success);
}
}

View file

@ -0,0 +1,70 @@
use alloc::vec::Vec;
use crate::xhci::{
data_structures::{EventRingSegmentTable, EventTrb, TransferRequestBlock, TrbRingSegment},
trb_ring::TrbPointer,
};
pub struct EventRing {
segment_table: EventRingSegmentTable,
segments: Vec<TrbRingSegment>,
cycle_bit: bool,
trb_pointer: TrbPointer,
}
impl EventRing {
pub fn new() -> Self {
// Software maintains an Event Ring Consumer Cycle State (CCS) bit, initializing it
// to 1...
let cycle_bit = true;
let mut event_ring = Self {
segment_table: EventRingSegmentTable::new(1),
segments: [TrbRingSegment::new(100)].into(),
cycle_bit,
trb_pointer: TrbPointer::default(),
};
event_ring.segment_table[0].update_from_trb_ring(&event_ring.segments[0]);
event_ring
}
pub fn segment_table(&self) -> &EventRingSegmentTable {
&self.segment_table
}
pub fn erdp_physical_address(&self) -> usize {
self.segments[self.trb_pointer.segment_index].physical_address()
+ self.trb_pointer.segment_physical_offset()
}
fn current_trb(&self) -> TransferRequestBlock {
// TODO: These should be volatile reads.
self.segments[self.trb_pointer.segment_index][self.trb_pointer.segment_offset]
}
fn increment_pointer(&mut self) {
self.trb_pointer.segment_offset += 1;
if self.trb_pointer.segment_offset == self.segments[self.trb_pointer.segment_index].len() {
self.trb_pointer.segment_index += 1;
self.trb_pointer.segment_offset = 0;
if self.trb_pointer.segment_index == self.segments.len() {
// Wrap around to front.
self.trb_pointer.segment_index = 0;
self.cycle_bit = !self.cycle_bit;
}
}
}
pub fn get_next(&mut self) -> Option<EventTrb> {
let curr = self.current_trb();
if curr.cycle() != self.cycle_bit {
None
} else {
self.increment_pointer();
Some(curr.into())
}
}
}

View file

@ -0,0 +1,60 @@
use core::ptr::NonNull;
use mammoth::cap::Capability;
use crate::xhci::{
data_structures::{EventTrb, TransferRequestBlock},
event_ring::EventRing,
registers::{InterrupterModeration, InterrupterRegisterSet, InterrupterRegisters},
};
pub struct Interrupter {
event_ring: EventRing,
register_set: InterrupterRegisters,
irq_port_cap: Capability,
}
impl Interrupter {
pub fn new(
interrupter_register_set: NonNull<InterrupterRegisterSet>,
irq_port_cap: Capability,
) -> Self {
Self {
event_ring: EventRing::new(),
register_set: InterrupterRegisters::new(interrupter_register_set),
irq_port_cap,
}
}
// SAFETY:
// - HC Must be halted for interrupter 0.
pub unsafe fn reset(&mut self) {
// SAFETY:
// - THe segment table is size 1.
unsafe {
self.register_set.set_event_ring(
self.event_ring.segment_table(),
self.event_ring.erdp_physical_address(),
);
}
self.register_set.set_moderation(
InterrupterModeration::new()
.with_interrupt_moderation_interval(4000)
.with_interrupt_moderation_counter(0),
);
self.register_set.enable_interrupts();
}
pub fn interrupt_loop(&mut self, completion_handler: impl Fn(EventTrb)) {
loop {
let _ = mammoth::syscall::port_recv(&self.irq_port_cap, &mut [], &mut []).unwrap();
while let Some(trb) = self.event_ring.get_next() {
completion_handler(trb);
}
self.register_set
.update_dequeue_pointer_clearing_busy(self.event_ring.erdp_physical_address());
}
}
}

View file

@ -1,2 +1,7 @@
pub mod data_structures;
pub mod registers;
mod data_structures;
mod device_context_base_array;
pub mod driver;
mod event_ring;
mod interrupter;
mod registers;
mod trb_ring;

View file

@ -0,0 +1,188 @@
use core::task::{Poll, Waker};
use alloc::{collections::vec_deque::VecDeque, sync::Arc, vec::Vec};
use mammoth::sync::Mutex;
use crate::xhci::{
data_structures::{
CommandCompletionEvent, CommandTrb, TransferRequestBlock, TrbLink, TrbRingSegment, TypedTrb,
},
registers::DoorbellPointer,
};
struct TrbFutureState<T> {
/// Physical Address for the enqueued TRB.
/// Used for sanity checking.
physical_address: usize,
waker: Option<Waker>,
response: Option<T>,
}
#[derive(Clone)]
pub struct TrbFuture<T> {
state: Arc<Mutex<TrbFutureState<T>>>,
}
impl<T> TrbFuture<T> {
fn new(paddr: usize) -> Self {
Self {
state: Arc::new(Mutex::new(TrbFutureState {
physical_address: paddr,
waker: None,
response: None,
})),
}
}
}
impl<T: Copy> Future for TrbFuture<T> {
type Output = T;
fn poll(
self: core::pin::Pin<&mut Self>,
cx: &mut core::task::Context<'_>,
) -> core::task::Poll<Self::Output> {
let mut state = self.state.lock();
match state.response {
Some(trb) => Poll::Ready(trb),
None => {
state.waker = Some(cx.waker().clone());
Poll::Pending
}
}
}
}
#[derive(Default, Copy, Clone, Debug)]
pub struct TrbPointer {
/// Index into the vector of trb segments.
pub segment_index: usize,
/// Index into the specific segment.
/// This is a TransferRequestBlock index,
/// to get the physical_offset use segment_physical_offset()
pub segment_offset: usize,
}
impl TrbPointer {
pub fn segment_physical_offset(&self) -> usize {
self.segment_offset * size_of::<TransferRequestBlock>()
}
}
pub struct TrbRing<T> {
segments: Vec<TrbRingSegment>,
enqueue_pointer: TrbPointer,
cycle_bit: bool,
pending_futures: VecDeque<TrbFuture<T>>,
}
impl<T: Clone> TrbRing<T> {
pub fn new() -> Self {
Self {
// TODO: What size and count should this be.
segments: alloc::vec![TrbRingSegment::new(100)],
enqueue_pointer: TrbPointer::default(),
// Start with this as true so we are flipping bits from 0 (default) to 1
// to mark the enqueue pointer.
cycle_bit: true,
pending_futures: VecDeque::new(),
}
}
pub fn physical_base_address(&self) -> usize {
self.segments[0].physical_address()
}
fn physical_address_of_enqueue_pointer(&self) -> usize {
self.segments[self.enqueue_pointer.segment_index].physical_address()
+ self.enqueue_pointer.segment_physical_offset()
}
pub fn enqueue_trb(&mut self, trb: TransferRequestBlock) -> TrbFuture<T> {
let paddr = self.physical_address_of_enqueue_pointer();
*self.next_trb_ref() = trb.with_cycle(self.cycle_bit);
self.advance_enqueue_pointer();
let future = TrbFuture::new(paddr);
self.pending_futures.push_back(future.clone());
future
}
fn next_trb_ref(&mut self) -> &mut TransferRequestBlock {
&mut self.segments[self.enqueue_pointer.segment_index][self.enqueue_pointer.segment_offset]
}
fn advance_enqueue_pointer(&mut self) {
self.enqueue_pointer.segment_offset += 1;
if self.enqueue_pointer.segment_offset
== self.segments[self.enqueue_pointer.segment_index].len() - 1
{
// We have reached the end of the segment, insert a link trb.
// Increment the segment index with wrapping.
let next_segment_index =
if self.enqueue_pointer.segment_index + 1 == self.segments.len() {
0
} else {
self.enqueue_pointer.segment_index + 1
};
let next_segment_pointer = self.segments[next_segment_index].physical_address();
let toggle_cycle = next_segment_index == 0;
*self.next_trb_ref() = TrbLink::new()
.with_ring_segment_pointer(next_segment_pointer as u64)
.with_cycle(self.cycle_bit)
.with_toggle_cycle(toggle_cycle)
.to_trb();
// Flip toggle cycle bit if necessary.
self.cycle_bit ^= toggle_cycle;
self.enqueue_pointer = TrbPointer {
segment_index: next_segment_index,
segment_offset: 0,
};
}
}
pub fn handle_completion(&mut self, completion_trb: T, physical_address: usize) {
let completion = self.pending_futures.pop_front().unwrap();
let mut completion = completion.state.lock();
// TODO: Handle recovery scenarios here.
assert!(
completion.physical_address == physical_address,
"Got an unexpected command completion. Expected: {:0x}, Got: {:0x}",
completion.physical_address,
physical_address
);
completion.response = Some(completion_trb);
if let Some(waker) = &completion.waker {
waker.wake_by_ref();
}
}
}
pub struct CommandRing {
pub trb_ring: TrbRing<CommandCompletionEvent>,
doorbell: DoorbellPointer,
}
impl CommandRing {
pub fn new(doorbell: DoorbellPointer) -> Self {
Self {
trb_ring: TrbRing::new(),
doorbell,
}
}
// We have to explicitly return a future her
pub fn enqueue_command(&mut self, command: CommandTrb) -> TrbFuture<CommandCompletionEvent> {
let fut = self.trb_ring.enqueue_trb(command.into());
// Command Doorbell is always 0.
self.doorbell.ring(0);
fut
}
}

View file

@ -18,7 +18,7 @@ if [[ $1 == "debug" ]]; then
fi
# Use machine q35 to access PCI devices.
qemu-system-x86_64 -machine q35 -d guest_errors -m 1G -serial stdio -hda ${BUILD_DIR}/disk.img ${QEMU_ARGS} -device nec-usb-xhci,id=xhci -device usb-kbd,bus=xhci.0
~/.local/bin/qemu-system-x86_64 -machine q35 -d guest_errors -m 1G -serial stdio -hda ${BUILD_DIR}/disk.img ${QEMU_ARGS} -device nec-usb-xhci,id=xhci -device usb-kbd,bus=xhci.0
popd
# Extra options to add to this script in the future.

View file

@ -18,6 +18,8 @@ void DriverManager::WriteMessage(uint64_t irq_num, IpcMessage&& message) {
return;
}
dbgln("IRQ offset {x}", offset);
driver_list_[offset]->Send(glcr::Move(message));
}