Rust XHCI Implementation.

This commit is contained in:
Drew 2025-12-05 22:01:13 -08:00
parent 0b95098748
commit 1b2ca34dd5
23 changed files with 707 additions and 110 deletions

44
rust/Cargo.lock generated
View file

@ -2,12 +2,6 @@
# It is not intended for manual editing.
version = 4
[[package]]
name = "autocfg"
version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0"
[[package]]
name = "bitfield-struct"
version = "0.8.0"
@ -80,11 +74,10 @@ dependencies = [
[[package]]
name = "lock_api"
version = "0.4.12"
version = "0.4.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17"
checksum = "224399e74b87b5f3557511d98dff8b14089b3dadafcab6bb93eab67d3aace965"
dependencies = [
"autocfg",
"scopeguard",
]
@ -105,9 +98,9 @@ dependencies = [
[[package]]
name = "prettyplease"
version = "0.2.20"
version = "0.2.37"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5f12335488a2f3b0a83b14edad48dca9879ce89b2edd10e80237e4e852dd645e"
checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b"
dependencies = [
"proc-macro2",
"syn",
@ -115,18 +108,18 @@ dependencies = [
[[package]]
name = "proc-macro2"
version = "1.0.86"
version = "1.0.103"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77"
checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8"
dependencies = [
"unicode-ident",
]
[[package]]
name = "quote"
version = "1.0.36"
version = "1.0.42"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7"
checksum = "a338cc41d27e6cc6dce6cefc13a0729dfbb81c262b1f519331575dd80ef3067f"
dependencies = [
"proc-macro2",
]
@ -148,9 +141,9 @@ dependencies = [
[[package]]
name = "syn"
version = "2.0.72"
version = "2.0.111"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dc4b9b9bf2add8093d3f2c0204471e951b2285580335de42f9d2534f3ae7a8af"
checksum = "390cc9a294ab71bdb1aa2e99d13be9c753cd2d7bd6560c77118597410c4d2e87"
dependencies = [
"proc-macro2",
"quote",
@ -178,15 +171,15 @@ dependencies = [
[[package]]
name = "unicode-ident"
version = "1.0.12"
version = "1.0.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5"
[[package]]
name = "unicode-segmentation"
version = "1.11.0"
version = "1.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202"
checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493"
[[package]]
name = "victoriafalls"
@ -200,12 +193,21 @@ dependencies = [
"yunqc",
]
[[package]]
name = "volatile"
version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "af8ca9a5d4debca0633e697c88269395493cebf2e10db21ca2dbde37c1356452"
[[package]]
name = "voyageurs"
version = "0.1.0"
dependencies = [
"bitfield-struct 0.12.1",
"mammoth",
"pci",
"volatile",
"yellowstone-yunq",
]
[[package]]

View file

@ -78,6 +78,10 @@ impl MemoryRegion {
})
}
pub fn vaddr(&self) -> usize {
self.virt_addr as usize
}
pub fn slice<T>(&self) -> &[T] {
unsafe {
slice::from_raw_parts(
@ -246,11 +250,11 @@ pub fn map_cap_and_leak(mem_cap: Capability) -> u64 {
vaddr
}
pub fn map_direct_physical_and_leak(paddr: u64, size: u64) -> u64 {
pub fn map_direct_physical_and_leak<T>(paddr: u64, size: u64) -> *mut T {
let mem_cap = syscall::memory_object_direct_physical(paddr, size).unwrap();
let vaddr = syscall::address_space_map(&mem_cap).unwrap();
mem_cap.release();
vaddr
vaddr as *mut T
}
pub fn map_physical_and_leak(size: u64) -> (u64, u64) {

View file

@ -54,6 +54,12 @@ impl<T> PhysicalBox<[T]> {
let (memory_region, paddr) =
MemoryRegion::contiguous_physical(layout.size() as u64).expect("Failed to allocate");
crate::debug!(
"Physical box allocated: v {:0x} p {:0x}",
memory_region.vaddr(),
paddr
);
let ptr: *mut T = memory_region.mut_ptr_at_offset(0);
for i in 0..len {
unsafe {

View file

@ -72,15 +72,15 @@ impl PciDevice {
control.capable_address_64(),
"We don't handle the non-64bit case for MSI yet."
);
assert!(
control.multi_message_capable() == 0,
"We don't yet handle multi-message capable devices."
);
if control.multi_message_capable() != 0 {
mammoth::debug!("WARN: We don't yet handle multi-message capable devices.");
}
// FIXME: These probably need to be volatile writes.
let header: &mut PciDeviceHeader = self.memory_region.as_mut();
header.command = header.command.with_interrupt_disable(true);
msi_cap.msi_control = control.with_msi_enable(true);
msi_cap.msi_control = control.with_msi_enable(true).with_multi_message_enable(0);
// For setting addr and data field, see intel ref
// Vol 3. Section 11.11

View file

@ -1,3 +1,5 @@
use core::ffi::c_void;
use alloc::sync::Arc;
use mammoth::{
cap::Capability,
@ -27,7 +29,8 @@ impl AhciController {
let pci_device = PciDevice::from_cap(pci_memory).unwrap();
let hba_vaddr =
mem::map_direct_physical_and_leak(pci_device.header().bars[5] as u64, 0x1100);
mem::map_direct_physical_and_leak(pci_device.header().bars[5] as u64, 0x1100)
as *mut c_void as u64;
let hba = unsafe { (hba_vaddr as *mut AhciHba).as_mut().unwrap() };
let mut controller = Self {
pci_device: Mutex::new(pci_device),

View file

@ -6,3 +6,10 @@ edition = "2024"
[dependencies]
bitfield-struct = "0.12"
mammoth = { path = "../../lib/mammoth/" }
pci = { path = "../../lib/pci" }
volatile = "0.6.1"
yellowstone-yunq = { version = "0.1.0", path = "../../lib/yellowstone" }
[features]
default = ["debug"]
debug = []

View file

@ -5,12 +5,28 @@ extern crate alloc;
mod xhci;
use mammoth::{debug, define_entry, zion::z_err_t};
use mammoth::{cap::Capability, debug, define_entry, zion::z_err_t};
use pci::PciDevice;
use xhci::driver::XHCIDriver;
define_entry!();
#[unsafe(no_mangle)]
extern "C" fn main() -> z_err_t {
debug!("In Voyageurs");
#[cfg(feature = "debug")]
debug!("Voyageurs Starting.");
let yellowstone = yellowstone_yunq::from_init_endpoint();
let xhci_info = yellowstone
.get_xhci_info()
.expect("Failed to get XHCI info from yellowstone.");
let pci_device = PciDevice::from_cap(Capability::take(xhci_info.xhci_region)).unwrap();
let xhci_driver = XHCIDriver::from_pci_device(pci_device);
loop {}
0
}

View file

@ -1,9 +1,8 @@
use core::ops::{Index, IndexMut};
use alloc::{boxed::Box, vec};
use mammoth::physical_box::PhysicalBox;
use crate::xhci::data_structures::TrbRing;
use crate::xhci::data_structures::TrbRingSegment;
#[repr(align(64))]
#[derive(Default, Clone)]
@ -19,10 +18,16 @@ pub struct EventRingSegmentTableEntry {
}
impl EventRingSegmentTableEntry {
pub fn from_trb_fing(&mut self, trb_ring: &TrbRing) {
pub fn from_trb_ring(&mut self, trb_ring: &TrbRingSegment) {
mammoth::debug!("RSTE: {:0x}", self as *const _ as usize);
self.ring_segment_base_address = trb_ring.physical_address() as u64;
assert!(self.ring_segment_base_address % 64 == 0);
self.ring_segment_size = trb_ring.len() as u64;
unsafe {
core::ptr::write_volatile(
&mut self.ring_segment_size as *mut u64,
trb_ring.len() as u64,
)
};
assert!(self.ring_segment_size >= 16);
assert!(self.ring_segment_size <= 4096);
}

View file

@ -1,8 +1,10 @@
mod endpoint_context;
mod event_ring_segment_table;
mod slot_context;
mod trb_ring;
mod trb;
mod trb_ring_segment;
pub use event_ring_segment_table::*;
pub use slot_context::*;
pub use trb_ring::*;
pub use trb::*;
pub use trb_ring_segment::*;

View file

@ -0,0 +1,115 @@
use bitfield_struct::{bitenum, bitfield};
#[bitenum]
#[repr(u8)]
#[derive(Debug)]
pub enum TrbType {
#[fallback]
Reserved = 0,
Link = 6,
NoOp = 8,
}
#[bitfield(u128)]
pub struct TransferRequestBlock {
parameter: u64,
status: u32,
pub cycle: bool,
evaluate_next: bool,
flag_2: bool,
flag_3: bool,
flag_4: bool,
flag_5: bool,
flag_6: bool,
flag_7: bool,
flag_8: bool,
flag_9: bool,
#[bits(6)]
trb_type: u8,
control: u16,
}
impl TransferRequestBlock {}
pub trait TypedTrb {
fn from_trb(trb: TransferRequestBlock) -> Self
where
Self: From<u128>,
{
trb.into_bits().into()
}
fn to_trb(self) -> TransferRequestBlock
where
Self: Into<u128>,
{
Into::<u128>::into(self).into()
}
}
#[bitfield(u128)]
pub struct TrbNoOp {
__: u64,
#[bits(22)]
__: u32,
#[bits(10, default = 0)]
interrupter_target: u16,
cycle: bool,
evaluate_next: bool,
__: bool,
__: bool,
chain: bool,
#[bits(default = true)]
interrupt_on_completion: bool,
#[bits(4)]
__: u8,
#[bits(6, default = TrbType::NoOp)]
trb_type: TrbType,
__: u16,
}
impl TypedTrb for TrbNoOp {}
#[bitfield(u128)]
pub struct TrbLink {
/// Ring Segment Pointer Hi and Lo. These fields represent the high order bits of the 64-bit base
/// address of the next Ring Segment.
/// The memory structure referenced by this physical memory pointer shall begin on a 16-byte
/// address boundary.
pub ring_segment_pointer: u64,
#[bits(22)]
__: u32,
/// Interrupter Target. This field defines the index of the Interrupter that will receive Transfer
/// Events generated by this TRB. Valid values are between 0 and MaxIntrs-1.
/// This field is ignored by the xHC on Command Rings.
#[bits(10)]
pub interrupter_target: u16,
/// Cycle bit (C). This bit is used to mark the Enqueue Pointer location of a Transfer or Command
/// Ring.
pub cycle: bool,
/// Toggle Cycle (TC). When set to 1, the xHC shall toggle its interpretation of the Cycle bit. When
/// cleared to 0, the xHC shall continue to the next segment using its current interpretation of the
/// Cycle bit.
pub toggle_cycle: bool,
__: bool,
__: bool,
/// Chain bit (CH). Set to 1 by software to associate this TRB with the next TRB on the Ring. A
/// Transfer Descriptor (TD) is defined as one or more TRBs. The Chain bit is used to identify the
/// TRBs that comprise a TD. Refer to section 4.11.7 for more information on Link TRB placement
/// within a TD. On a Command Ring this bit is ignored by the xHC.
#[bits(default = true)]
chain: bool,
/// Interrupt On Completion (IOC). If this bit is set to 1, it specifies that when this TRB completes,
/// the Host Controller shall notify the system of the completion by placing an Event TRB on the
/// Event ring and sending an interrupt at the next interrupt threshold.
pub interrupt_on_completion: bool,
#[bits(4)]
__: u8,
/// TRB Type. This field is set to Link TRB type. Refer to Table 6-91 for the definition of the Type
/// TRB IDs.
#[bits(6, default = TrbType::Link)]
trb_type: TrbType,
__: u16,
}
impl TypedTrb for TrbLink {}

View file

@ -1,113 +0,0 @@
use core::{
ops::{Index, IndexMut},
slice::SliceIndex,
};
use bitfield_struct::{bitenum, bitfield};
use mammoth::physical_box::PhysicalBox;
#[bitenum]
#[repr(u8)]
#[derive(Debug)]
pub enum TrbType {
#[fallback]
Reserved = 0,
NoOp = 8,
}
#[bitfield(u128)]
pub struct TransferRequestBlock {
parameter: u64,
status: u32,
cycle: bool,
evaluate_next: bool,
flag_2: bool,
flag_3: bool,
flag_4: bool,
flag_5: bool,
flag_6: bool,
flag_7: bool,
flag_8: bool,
flag_9: bool,
#[bits(6)]
trb_type: u8,
control: u16,
}
impl TransferRequestBlock {}
trait TypedTrb {
fn from_trb(trb: TransferRequestBlock) -> Self
where
Self: From<u128>,
{
trb.into_bits().into()
}
fn to_trb(self) -> TransferRequestBlock
where
Self: Into<u128>,
{
Into::<u128>::into(self).into()
}
}
#[bitfield(u128)]
pub struct TrbNoOp {
__: u64,
__: u32,
cycle: bool,
evaluate_next: bool,
__: bool,
__: bool,
chain: bool,
#[bits(default = true)]
interrupt_on_completion: bool,
#[bits(4)]
__: u8,
#[bits(6, default = TrbType::NoOp)]
trb_type: TrbType,
__: u16,
}
impl TypedTrb for TrbNoOp {}
#[repr(transparent)]
pub struct TrbRing(PhysicalBox<[TransferRequestBlock]>);
impl TrbRing {
pub fn new(size: usize) -> Self {
Self(PhysicalBox::default_with_count(
TransferRequestBlock::default(),
size,
))
}
pub fn len(&self) -> usize {
self.0.len()
}
pub fn physical_address(&self) -> usize {
self.0.physical_address()
}
}
impl<I> Index<I> for TrbRing
where
I: SliceIndex<[TransferRequestBlock]>,
{
type Output = I::Output;
fn index(&self, index: I) -> &Self::Output {
&self.0[index]
}
}
impl<I> IndexMut<I> for TrbRing
where
I: SliceIndex<[TransferRequestBlock]>,
{
fn index_mut(&mut self, index: I) -> &mut Self::Output {
&mut self.0[index]
}
}

View file

@ -0,0 +1,48 @@
use core::{
ops::{Index, IndexMut},
slice::SliceIndex,
};
use mammoth::physical_box::PhysicalBox;
use crate::xhci::data_structures::TransferRequestBlock;
#[repr(transparent)]
pub struct TrbRingSegment(PhysicalBox<[TransferRequestBlock]>);
impl TrbRingSegment {
pub fn new(size: usize) -> Self {
Self(PhysicalBox::default_with_count(
TransferRequestBlock::default(),
size,
))
}
pub fn len(&self) -> usize {
self.0.len()
}
pub fn physical_address(&self) -> usize {
self.0.physical_address()
}
}
impl<I> Index<I> for TrbRingSegment
where
I: SliceIndex<[TransferRequestBlock]>,
{
type Output = I::Output;
fn index(&self, index: I) -> &Self::Output {
&self.0[index]
}
}
impl<I> IndexMut<I> for TrbRingSegment
where
I: SliceIndex<[TransferRequestBlock]>,
{
fn index_mut(&mut self, index: I) -> &mut Self::Output {
&mut self.0[index]
}
}

View file

@ -0,0 +1,22 @@
use mammoth::mem::MemoryRegion;
pub struct DeviceContextBaseArray {
#[allow(dead_code)]
region: MemoryRegion,
physical_addr: usize,
}
impl DeviceContextBaseArray {
pub fn new() -> Self {
let (region, physical_addr) = MemoryRegion::contiguous_physical(0x1000).unwrap();
region.zero_region();
Self {
region,
physical_addr: physical_addr as usize,
}
}
pub fn physical_addr(&self) -> usize {
self.physical_addr
}
}

View file

@ -0,0 +1,236 @@
use core::ptr::NonNull;
use core::slice;
use mammoth::mem::map_direct_physical_and_leak;
use mammoth::{map_unaligned_volatile, mem::MemoryRegion};
use mammoth::{read_unaligned_volatile, write_unaligned_volatile};
use volatile::VolatilePtr;
use super::registers::{self};
use crate::xhci::data_structures::{TrbNoOp, TypedTrb};
use crate::xhci::device_context_base_array::DeviceContextBaseArray;
use crate::xhci::event_ring::EventRing;
use crate::xhci::registers::HostControllerOperational;
use crate::xhci::trb_ring::TrbRing;
pub struct XHCIDriver<'a> {
#[allow(dead_code)]
pci_device: pci::PciDevice,
capabilities: registers::HostControllerCapabilities,
// TODO: Fix alignment of this type so we can do more targetted reads and writes.
operational: VolatilePtr<'a, HostControllerOperational>,
registers_region: MemoryRegion,
command_ring: TrbRing,
event_ring: EventRing,
device_context_base_array: DeviceContextBaseArray,
}
impl<'a> XHCIDriver<'_> {
pub fn from_pci_device(mut pci_device: pci::PciDevice) -> Self {
// BAR0 Size Allocation
// If virtualization is supported, the Capability and Operational Register sets, and
// the Extended Capabilities may reside in a single page of virtual memory,
// however the RTSOFF and DBOFF Registers shall position the Runtime and
// Doorbell Registers to reside on their own respective virtual memory pages. The
// BAR0 size shall provide space that is sufficient to cover the offset between the
// respective register spaces (Capability, Operational, Runtime, etc.) and the
// register spaces themselves (e.g. a minimum of 3 virtual memory pages).
// If virtualization is not supported, all xHCI register spaces may reside on a single
// page pointed to by the BAR0.
let three_pages = 0x3000;
let address =
((pci_device.header().bars[1] as u64) << 32) | (pci_device.header().bars[0] as u64);
let registers_region = MemoryRegion::direct_physical(address, three_pages).unwrap();
let caps_ptr: *mut registers::HostControllerCapabilities =
map_direct_physical_and_leak(address, 0x1000);
let _port_cap = pci_device.register_msi().unwrap();
// SAFETY:
// - The pointer is valid.
// - No other thread has access in this block.
let capabilities = unsafe {
VolatilePtr::new(
// UNWRAP: We just constructed this object with a non-null value.
NonNull::new(caps_ptr).unwrap(),
)
.read()
};
let cap_length_and_version = capabilities.cap_length_and_version;
let operational_ptr = unsafe {
(caps_ptr as *mut u8).add(cap_length_and_version.cap_length() as usize)
as *mut HostControllerOperational
};
let operational: VolatilePtr<HostControllerOperational> =
unsafe { VolatilePtr::new(NonNull::new(operational_ptr).unwrap()) };
let mut driver = Self {
pci_device,
capabilities,
operational,
registers_region,
command_ring: TrbRing::new(),
event_ring: EventRing::new(),
device_context_base_array: DeviceContextBaseArray::new(),
};
driver.initialize();
driver.command_ring.enqueue_trb(TrbNoOp::new().to_trb());
driver.doorbells()[0].ring_command();
driver
}
fn interrupters(&self) -> &mut [registers::InterrupterRegisterSet] {
// See Table 5-35: Host Controller Runtime Registers
const INTERRUPTER_OFFSET_FROM_RUNTIME: u32 = 0x20;
let runtime = self.capabilities.runtime_register_space_offset;
let interrupter_offset = (runtime + INTERRUPTER_OFFSET_FROM_RUNTIME) as usize;
let params1 = self.capabilities.params_1;
// SAFETY: The XHCI spec says so?
unsafe {
slice::from_raw_parts_mut(
self.registers_region.mut_ptr_at_offset(interrupter_offset),
params1.max_interrupters() as usize,
)
}
}
fn doorbells(&mut self) -> &mut [registers::Doorbell] {
let doorbell_offset = self.capabilities.doorbell_offset;
let params1 = self.capabilities.params_1;
// SAFETY: The XHCI spec says so?
unsafe {
slice::from_raw_parts_mut(
self.registers_region
.mut_ptr_at_offset(doorbell_offset as usize),
params1.max_device_slots() as usize,
)
}
}
fn initialize(&mut self) {
#[cfg(feature = "debug")]
mammoth::debug!("Stopping XHCI Controller.");
// Stop the host controller.
self.operational.update(|mut o| {
o.usb_command = o.usb_command.with_run_stop(false);
o
});
#[cfg(feature = "debug")]
mammoth::debug!("Waiting for controller to halt.");
// Sleep until the controller is halted.
let mut status = self.operational.read().usb_status;
while !status.host_controller_halted() {
// TODO: Sleep for how long?
mammoth::syscall::thread_sleep(50).unwrap();
status = self.operational.read().usb_status;
}
#[cfg(feature = "debug")]
mammoth::debug!("Resetting Controller.");
self.operational.update(|mut o| {
o.usb_command = o.usb_command.with_host_controller_reset(false);
o
});
let mut command: registers::UsbCommand = self.operational.read().usb_command;
while command.host_controller_reset() {
// TODO: Sleep for how long?
mammoth::syscall::thread_sleep(50).unwrap();
command = self.operational.read().usb_command;
}
#[cfg(feature = "debug")]
mammoth::debug!("XHCI Controller Reset, waiting ready.");
let mut status = self.operational.read().usb_status;
while status.controller_not_ready() {
// TODO: Sleep for how long?
mammoth::syscall::thread_sleep(50).unwrap();
status = self.operational.read().usb_status;
}
#[cfg(feature = "debug")]
mammoth::debug!("XHCI Controller Ready.");
#[cfg(feature = "debug")]
mammoth::debug!("Setting Command Ring");
self.operational.update(|mut o| {
// TODO: Split this struct to make it clearer that we are setting the cycle bit here.
o.command_ring_control = (self.command_ring.physical_base_address() | 1) as u64;
o
});
#[cfg(feature = "debug")]
mammoth::debug!("Setting DCBA.");
let params1 = self.capabilities.params_1;
self.operational.update(|mut o| {
o.device_context_base_address_array_pointer =
self.device_context_base_array.physical_addr() as u64;
// We tell the controller that we can support as many slots as it does because
// we allocate a full 4K page to the DCBA, which is 256 entries and the max
// slots are 255.
o.configure = o
.configure
.with_max_device_slots_enabled(params1.max_device_slots());
o
});
let params2 = self.capabilities.params_2;
let max_scratchpad_buffers =
(params2.max_scratchpad_buffers_hi() << 5) | params2.max_scratchpad_buffers_lo();
assert!(
max_scratchpad_buffers == 0,
"Unsupported scratchpad buffers."
);
#[cfg(feature = "debug")]
mammoth::debug!("Setting up initial event ring.");
let interrupter0 = &mut self.interrupters()[0];
// SAFETY:
// - The HC was halted above.
// - THe segment table is size 1.
unsafe {
interrupter0.set_event_ring(
self.event_ring.segment_table(),
self.event_ring.initial_erdp(),
);
}
write_unaligned_volatile!(
interrupter0,
interrupter_moderation,
registers::InterrupterModeration::new()
.with_interrupt_moderation_interval(4000)
.with_interrupt_moderation_counter(0)
);
write_unaligned_volatile!(
interrupter0,
interrupter_management,
registers::InterrupterManagement::new().with_interrupt_enabled(true)
);
self.operational.update(|mut o| {
o.usb_command = o
.usb_command
.with_run_stop(true)
.with_interrupter_enable(true);
o
});
#[cfg(feature = "debug")]
mammoth::debug!("Enabled interrupts and controller.");
}
}

View file

@ -0,0 +1,29 @@
use alloc::vec::Vec;
use crate::xhci::data_structures::{EventRingSegmentTable, TrbRingSegment};
pub struct EventRing {
segment_table: EventRingSegmentTable,
segments: Vec<TrbRingSegment>,
}
impl EventRing {
pub fn new() -> Self {
let mut event_ring = Self {
segment_table: EventRingSegmentTable::new(1),
segments: [TrbRingSegment::new(100)].into(),
};
event_ring.segment_table[0].from_trb_ring(&event_ring.segments[0]);
event_ring
}
pub fn segment_table(&self) -> &EventRingSegmentTable {
&self.segment_table
}
pub fn initial_erdp(&self) -> usize {
self.segments[0].physical_address()
}
}

View file

@ -1,2 +1,7 @@
pub mod data_structures;
pub mod registers;
mod data_structures;
mod device_context_base_array;
pub mod driver;
mod event_ring;
mod registers;
mod trb_ring;

View file

@ -321,6 +321,7 @@ pub struct HCCParams2 {
///
/// These registers are located at the addresses specified in BAR0 and BAR1 in the PCI Header.
#[repr(C, packed)]
#[derive(Copy, Clone)]
pub struct HostControllerCapabilities {
pub cap_length_and_version: HostControllerCapabilitiesLengthAndVersion,
pub params_1: HCSParams1,

View file

@ -63,3 +63,19 @@ pub struct Doorbell {
/// This field returns 0 when read
db_stream_id: u16,
}
impl Doorbell {
pub fn ring(&mut self, target: u8) {
// SAFETY:
// - We know this is a valid reference.
unsafe {
core::ptr::write_volatile(
self as *mut _,
Doorbell::new().with_db_target(target).with_db_stream_id(0),
);
}
}
pub fn ring_command(&mut self) {
self.ring(0)
}
}

View file

@ -1,4 +1,6 @@
use bitfield_struct::bitfield;
use mammoth::mem::MemoryRegion;
use volatile::VolatilePtr;
#[bitfield(u32)]
pub struct UsbCommand {
@ -273,6 +275,7 @@ pub struct UsbConfigure {
/// of the Capability Registers Length (CAPLENGTH) register (refer to Section 5.3.1)
/// to the Capability Base address. All registers are multiples of 32 bits in length
#[repr(C, packed)]
#[derive(Copy, Clone)]
pub struct HostControllerOperational {
pub usb_command: UsbCommand,
pub usb_status: UsbStatus,

View file

@ -1,5 +1,7 @@
use bitfield_struct::bitfield;
use crate::xhci::data_structures::EventRingSegmentTable;
/// The Interrupter Management register allows system software to enable, disable,
/// and detect xHC interrupts.
///
@ -49,16 +51,6 @@ pub struct InterrupterModeration {
/// XHCI 5.5.2.3.1
#[bitfield(u32)]
pub struct EventRingSegmentTableSize {
/// Event Ring Segment Table Size RW. Default = 0. This field identifies the number of valid
/// Event Ring Segment Table entries in the Event Ring Segment Table pointed to by the Event Ring
/// Segment Table Base Address register. The maximum value supported by an xHC
/// implementation for this register is defined by the ERST Max field in the HCSPARAMS2 register
/// (5.3.4).
/// For Secondary Interrupters: Writing a value of 0 to this field disables the Event Ring. Any events
/// targeted at this Event Ring when it is disabled shall result in undefined behavior of the Event
/// Ring.
/// For the Primary Interrupter: Writing a value of 0 to this field shall result in undefined behavior
/// of the Event Ring. The Primary Event Ring cannot be disabled.
pub event_ring_segment_table_size: u16,
_reserved: u16,
}
@ -98,9 +90,18 @@ pub struct EventRingDequePointer {
pub struct InterrupterRegisterSet {
pub interrupter_management: InterrupterManagement,
pub interrupter_moderation: InterrupterModeration,
pub event_ring_segement_table_size: EventRingSegmentTableSize,
_reserved: u32,
/// Event Ring Segment Table Size RW. Default = 0. This field identifies the number of valid
/// Event Ring Segment Table entries in the Event Ring Segment Table pointed to by the Event Ring
/// Segment Table Base Address register. The maximum value supported by an xHC
/// implementation for this register is defined by the ERST Max field in the HCSPARAMS2 register
/// (5.3.4).
/// For Secondary Interrupters: Writing a value of 0 to this field disables the Event Ring. Any events
/// targeted at this Event Ring when it is disabled shall result in undefined behavior of the Event
/// Ring.
/// For the Primary Interrupter: Writing a value of 0 to this field shall result in undefined behavior
/// of the Event Ring. The Primary Event Ring cannot be disabled.
event_ring_segment_table_size: u32,
___: u32,
/// Event Ring Segment Table Base Address Register RW. Default = 0. This field defines the
/// high order bits of the start address of the Event Ring Segment Table.
/// Writing this register sets the Event Ring State Machine:EREP Advancement to the Start state.
@ -111,6 +112,41 @@ pub struct InterrupterRegisterSet {
/// NOTE: This must be aligned such that bits 0:5 are 0.
///
/// XHCI 5.5.2.3.2
pub event_ring_segment_table_base_address: u64,
pub event_ring_deque_pointer: u64,
event_ring_segment_table_base_address: u64,
event_ring_deque_pointer: u64,
}
impl InterrupterRegisterSet {
/// SAFETY:
/// - For the primary interrupter HC must be halted.
/// - The event rings size must be at most ERST_MAX from HCSPARAMS2
pub unsafe fn set_event_ring(
&mut self,
event_ring_segment_table: &EventRingSegmentTable,
event_ring_deque_pointer: usize,
) {
// NOTE: We must write the size before the base address otherwise qemu is unhappy.
// Not sure if this is required by the spec.
// SAFETY:
// - We know this address is valid and we have a mut reference to it.
unsafe {
core::ptr::write_volatile(
core::ptr::addr_of!(self.event_ring_segment_table_size) as *mut _,
event_ring_segment_table.len() as u32,
);
core::ptr::write_volatile(
core::ptr::addr_of!(self.event_ring_segment_table_base_address) as *mut _,
event_ring_segment_table.physical_address(),
);
core::ptr::write_volatile(
core::ptr::addr_of!(self.event_ring_deque_pointer) as *mut _,
event_ring_deque_pointer,
);
}
}
}
const _: () = assert!(size_of::<InterrupterRegisterSet>() == 0x20);

View file

@ -0,0 +1,88 @@
use alloc::vec::Vec;
use crate::xhci::data_structures::{TransferRequestBlock, TrbLink, TrbRingSegment, TypedTrb};
#[derive(Default, Copy, Clone)]
struct TrbPointer {
/// Index into the vector of trb segments.
segment_index: usize,
/// Index into the specific segment.
/// This is a TransferRequestBlock index,
/// to get the physical_offset use segment_physical_offset()
segment_offset: usize,
}
impl TrbPointer {
fn segment_physical_offset(&self) -> usize {
self.segment_offset * size_of::<TransferRequestBlock>()
}
}
pub struct TrbRing {
segments: Vec<TrbRingSegment>,
enqueue_pointer: TrbPointer,
dequeue_pointer: TrbPointer,
cycle_bit: bool,
}
impl TrbRing {
pub fn new() -> Self {
Self {
// TODO: What size and count should this be.
segments: alloc::vec![TrbRingSegment::new(100)],
enqueue_pointer: TrbPointer::default(),
dequeue_pointer: TrbPointer::default(),
// Start with this as true so we are flipping bits from 0 (default) to 1
// to mark the enqueue pointer.
cycle_bit: true,
}
}
pub fn physical_base_address(&self) -> usize {
self.segments[0].physical_address()
}
pub fn enqueue_trb(&mut self, trb: TransferRequestBlock) {
*self.next_trb_ref() = trb.with_cycle(self.cycle_bit);
self.advance_enqueue_pointer();
}
fn next_trb_ref(&mut self) -> &mut TransferRequestBlock {
&mut self.segments[self.enqueue_pointer.segment_index][self.enqueue_pointer.segment_offset]
}
fn advance_enqueue_pointer(&mut self) {
self.enqueue_pointer.segment_offset += 1;
if self.enqueue_pointer.segment_offset
== self.segments[self.enqueue_pointer.segment_index].len() - 1
{
// We have reached the end of the segment, insert a link trb.
// Increment the segment index with wrapping.
let next_segment_index =
if self.enqueue_pointer.segment_index + 1 == self.segments.len() {
0
} else {
self.enqueue_pointer.segment_index + 1
};
let next_segment_pointer = self.segments[next_segment_index].physical_address();
let toggle_cycle = next_segment_index == 0;
*self.next_trb_ref() = TrbLink::new()
.with_ring_segment_pointer(next_segment_pointer as u64)
.with_cycle(self.cycle_bit)
.with_toggle_cycle(toggle_cycle)
.to_trb();
// Flip toggle cycle bit if necessary.
self.cycle_bit ^= toggle_cycle;
self.enqueue_pointer = TrbPointer {
segment_index: next_segment_index,
segment_offset: 0,
};
}
}
}