Rust XHCI Implementation.

This commit is contained in:
Drew 2025-12-05 22:01:13 -08:00
parent 8b022a6b24
commit 592b5b468f
20 changed files with 1064 additions and 205 deletions

View file

@ -6,3 +6,10 @@ edition = "2024"
[dependencies]
bitfield-struct = "0.12"
mammoth = { path = "../../lib/mammoth/" }
pci = { path = "../../lib/pci" }
volatile = "0.6.1"
yellowstone-yunq = { version = "0.1.0", path = "../../lib/yellowstone" }
[features]
default = ["debug"]
debug = []

View file

@ -5,12 +5,47 @@ extern crate alloc;
mod xhci;
use mammoth::{debug, define_entry, zion::z_err_t};
use alloc::sync::Arc;
use mammoth::{
cap::Capability,
debug, define_entry,
sync::Mutex,
task::{Executor, Task},
zion::z_err_t,
};
use pci::PciDevice;
use xhci::driver::XHCIDriver;
define_entry!();
#[unsafe(no_mangle)]
extern "C" fn main() -> z_err_t {
debug!("In Voyageurs");
#[cfg(feature = "debug")]
debug!("Voyageurs Starting.");
let yellowstone = yellowstone_yunq::from_init_endpoint();
let xhci_info = yellowstone
.get_xhci_info()
.expect("Failed to get XHCI info from yellowstone.");
let pci_device = PciDevice::from_cap(Capability::take(xhci_info.xhci_region)).unwrap();
let xhci_driver = Arc::new(XHCIDriver::from_pci_device(pci_device));
let executor = Arc::new(Mutex::new(Executor::new()));
let driver_clone = xhci_driver.clone();
let spawner = executor.clone().lock().new_spawner();
let interrupt_thread = mammoth::thread::spawn(move || driver_clone.interrupt_loop(spawner));
executor
.clone()
.lock()
.spawn(Task::new(async move { xhci_driver.startup().await }));
executor.clone().lock().run();
interrupt_thread.join().unwrap();
0
}

View file

@ -0,0 +1,71 @@
use bitfield_struct::bitfield;
use crate::xhci::data_structures::{TransferRequestBlock, TrbType};
pub struct EnableSlotTrb {}
#[bitfield(u128)]
pub struct EnableSlotCommand {
__: u64,
__: u32,
#[bits(10)]
__: u16,
#[bits(6, default=TrbType::EnableSlotCommand)]
trb_type: TrbType,
#[bits(5)]
slot_type: u8,
#[bits(11)]
__: u16,
}
impl From<EnableSlotCommand> for CommandTrb {
fn from(value: EnableSlotCommand) -> Self {
Self(value.into_bits().into())
}
}
#[bitfield(u128)]
pub struct AddressDeviceCommand {
pub input_context_pointer: u64,
__: u32,
#[bits(9)]
__: u16,
pub block_set_address_request: bool,
#[bits(6, default=TrbType::AddressDeviceCommand)]
trb_typ: TrbType,
__: u8,
pub slot_id: u8,
}
impl From<AddressDeviceCommand> for CommandTrb {
fn from(value: AddressDeviceCommand) -> Self {
Self(value.into_bits().into())
}
}
#[bitfield(u128)]
pub struct NoOpCommand {
__: u64,
__: u32,
cycle: bool,
#[bits(9)]
__: u16,
#[bits(6, default = TrbType::NoOpCommand)]
trb_type: TrbType,
__: u16,
}
impl From<NoOpCommand> for CommandTrb {
fn from(value: NoOpCommand) -> Self {
Self(value.into_bits().into())
}
}
/// Simple type to ensure we are only sending commands to command rings.
pub struct CommandTrb(TransferRequestBlock);
impl From<CommandTrb> for TransferRequestBlock {
fn from(value: CommandTrb) -> Self {
value.0
}
}

View file

@ -0,0 +1,139 @@
use bitfield_struct::{bitenum, bitfield};
use crate::xhci::data_structures::{TransferRequestBlock, TrbType};
#[bitenum]
#[repr(u8)]
#[derive(Debug, Eq, PartialEq)]
pub enum CommandCompletionCode {
#[fallback]
#[allow(dead_code)]
Invalid = 0,
Success = 1,
}
#[bitfield(u128)]
pub struct TransferEvent {
pub transfer_trb_pointer: u64,
#[bits(24)]
pub trb_transfer_lenght: u32,
/// Completion Code. This field encodes the completion status of the command that generated the
/// event. Refer to the respective command definition for a list of the possible Completion Codes
/// associated with the command. Refer to section 6.4.5 for an enumerated list of possible error
/// conditions.
#[bits(8)]
pub completion_code: CommandCompletionCode,
#[bits(10)]
__: u16,
/// TRB Type. This field identifies the type of the TRB. Refer to Table 6-91 for the definition of the
/// Command Completion Event TRB type ID
#[bits(6, default=TrbType::TransferEvent)]
pub trb_type: TrbType,
#[bits(5)]
pub endpoint_id: u8,
#[bits(3)]
__: u8,
pub slot_id: u8,
}
#[bitfield(u128)]
pub struct CommandCompletionEvent {
/// Command TRB Pointer Hi and Lo. This field represents the high order bits of the 64-bit address
/// of the Command TRB that generated this event. Note that this field is not valid for some
/// Completion Code values. Refer to Table 6-90 for specific cases.
///
/// The memory structure referenced by this physical memory pointer shall be aligned on a 16-byte
/// address boundary.
pub command_trb_pointer: u64,
/// Command Completion Parameter. This field may optionally be set by a command. Refer to
/// section 4.6.6.1 for specific usage. If a command does not utilize this field it shall be treated as
/// RsvdZ.
#[bits(24)]
pub command_completion_parameter: u64,
/// Completion Code. This field encodes the completion status of the command that generated the
/// event. Refer to the respective command definition for a list of the possible Completion Codes
/// associated with the command. Refer to section 6.4.5 for an enumerated list of possible error
/// conditions.
#[bits(8)]
pub completion_code: CommandCompletionCode,
/// Cycle bit (C). This bit is used to mark the Dequeue Pointer of an Event Ring
pub cycle_bit: bool,
#[bits(9)]
__: u16,
/// TRB Type. This field identifies the type of the TRB. Refer to Table 6-91 for the definition of the
/// Command Completion Event TRB type ID
#[bits(6, default=TrbType::CommandCompletionEvent)]
pub trb_type: TrbType,
/// VF ID. The ID of the Virtual Function that generated the event. Note that this field is valid only if
/// Virtual Functions are enabled. If they are not enabled this field shall be cleared to 0.
pub vf_id: u8,
/// Slot ID. The Slot ID field shall be updated by the xHC to reflect the slot associated with the
/// command that generated the event, with the following exceptions:
///
/// - The Slot ID shall be cleared to 0 for No Op, Set Latency Tolerance Value, Get Port Bandwidth,
/// and Force Event Commands.
///
/// - The Slot ID shall be set to the ID of the newly allocated Device Slot for the Enable Slot
/// Command.
///
/// - The value of Slot ID shall be vendor defined when generated by a vendor defined command.
///
/// This value is used as an index in the Device Context Base Address Array to select the Device
/// Context of the source device. If this Event is due to a Host Controller Command, then this field
/// shall be cleared to 0.
pub slot_id: u8,
}
#[bitfield(u128)]
pub struct PortStatusChangeEvent {
#[bits(24)]
__: u32,
pub port_id: u8,
__: u32,
#[bits(24)]
__: u32,
#[bits(8)]
pub completion_code: CommandCompletionCode,
#[bits(10)]
__: u16,
#[bits(6, default=TrbType::PortStatusChangeEvent)]
trb_type: TrbType,
__: u16,
}
pub enum EventTrb {
Transfer(TransferEvent),
CommandCompletion(CommandCompletionEvent),
PortStatusChange(PortStatusChangeEvent),
BandwidthRequest(TransferRequestBlock),
Doorbell(TransferRequestBlock),
HostController(TransferRequestBlock),
DeviceNotification(TransferRequestBlock),
MFINDEXWrap(TransferRequestBlock),
}
impl From<TransferRequestBlock> for EventTrb {
fn from(value: TransferRequestBlock) -> Self {
match value.trb_type() {
TrbType::TransferEvent => {
EventTrb::Transfer(TransferEvent::from_bits(value.into_bits()))
}
TrbType::CommandCompletionEvent => {
EventTrb::CommandCompletion(CommandCompletionEvent::from_bits(value.into_bits()))
}
TrbType::PortStatusChangeEvent => {
EventTrb::PortStatusChange(PortStatusChangeEvent::from_bits(value.into_bits()))
}
TrbType::BandwidthRequestEvent => EventTrb::BandwidthRequest(value),
TrbType::DoorbellEvent => EventTrb::Doorbell(value),
TrbType::HostControllerEvent => EventTrb::HostController(value),
TrbType::DeviceNotificationEvent => EventTrb::DeviceNotification(value),
TrbType::MFINDEXWrapEvent => EventTrb::MFINDEXWrap(value),
t => panic!("Unknown trb type on event ring: {:?}", t),
}
}
}

View file

@ -1,14 +1,18 @@
mod command_trb;
mod device_context;
mod endpoint_context;
mod event_ring_segment_table;
mod event_trb;
mod input_context;
mod slot_context;
mod trb;
mod trb_ring_segment;
pub use command_trb::*;
pub use device_context::*;
pub use endpoint_context::*;
pub use event_ring_segment_table::*;
pub use event_trb::*;
pub use input_context::*;
pub use slot_context::*;
pub use trb::*;

View file

@ -75,29 +75,6 @@ where
}
}
#[bitfield(u128)]
pub struct TrbNoOp {
__: u64,
#[bits(22)]
__: u32,
#[bits(10, default = 0)]
interrupter_target: u16,
cycle: bool,
evaluate_next: bool,
__: bool,
__: bool,
chain: bool,
#[bits(default = true)]
interrupt_on_completion: bool,
#[bits(4)]
__: u8,
#[bits(6, default = TrbType::NoOpCommand)]
trb_type: TrbType,
__: u16,
}
impl TypedTrb for TrbNoOp {}
#[bitfield(u128)]
pub struct TrbLink {
/// Ring Segment Pointer Hi and Lo. These fields represent the high order bits of the 64-bit base
@ -141,139 +118,3 @@ pub struct TrbLink {
}
impl TypedTrb for TrbLink {}
#[bitfield(u128)]
pub struct TrbTransferEvent {
pub transfer_trb_pointer: u64,
#[bits(24)]
pub trb_transfer_lenght: u32,
/// Completion Code. This field encodes the completion status of the command that generated the
/// event. Refer to the respective command definition for a list of the possible Completion Codes
/// associated with the command. Refer to section 6.4.5 for an enumerated list of possible error
/// conditions.
pub completion_code: u8,
#[bits(10)]
__: u16,
/// TRB Type. This field identifies the type of the TRB. Refer to Table 6-91 for the definition of the
/// Command Completion Event TRB type ID
#[bits(6, default=TrbType::TransferEvent)]
pub trb_type: TrbType,
#[bits(5)]
pub endpoint_id: u8,
#[bits(3)]
__: u8,
pub slot_id: u8,
}
impl TypedTrb for TrbTransferEvent {}
#[bitenum]
#[repr(u8)]
pub enum CommandCompletionCode {
#[fallback]
#[allow(dead_code)]
Invalid = 0,
Success = 1,
}
#[bitfield(u128)]
pub struct TrbCommandCompletion {
/// Command TRB Pointer Hi and Lo. This field represents the high order bits of the 64-bit address
/// of the Command TRB that generated this event. Note that this field is not valid for some
/// Completion Code values. Refer to Table 6-90 for specific cases.
///
/// The memory structure referenced by this physical memory pointer shall be aligned on a 16-byte
/// address boundary.
pub command_trb_pointer: u64,
/// Command Completion Parameter. This field may optionally be set by a command. Refer to
/// section 4.6.6.1 for specific usage. If a command does not utilize this field it shall be treated as
/// RsvdZ.
#[bits(24)]
pub command_completion_parameter: u64,
/// Completion Code. This field encodes the completion status of the command that generated the
/// event. Refer to the respective command definition for a list of the possible Completion Codes
/// associated with the command. Refer to section 6.4.5 for an enumerated list of possible error
/// conditions.
pub completion_code: u8,
/// Cycle bit (C). This bit is used to mark the Dequeue Pointer of an Event Ring
pub cycle_bit: bool,
#[bits(9)]
__: u16,
/// TRB Type. This field identifies the type of the TRB. Refer to Table 6-91 for the definition of the
/// Command Completion Event TRB type ID
#[bits(6, default=TrbType::CommandCompletionEvent)]
pub trb_type: TrbType,
/// VF ID. The ID of the Virtual Function that generated the event. Note that this field is valid only if
/// Virtual Functions are enabled. If they are not enabled this field shall be cleared to 0.
pub vf_id: u8,
/// Slot ID. The Slot ID field shall be updated by the xHC to reflect the slot associated with the
/// command that generated the event, with the following exceptions:
///
/// - The Slot ID shall be cleared to 0 for No Op, Set Latency Tolerance Value, Get Port Bandwidth,
/// and Force Event Commands.
///
/// - The Slot ID shall be set to the ID of the newly allocated Device Slot for the Enable Slot
/// Command.
///
/// - The value of Slot ID shall be vendor defined when generated by a vendor defined command.
///
/// This value is used as an index in the Device Context Base Address Array to select the Device
/// Context of the source device. If this Event is due to a Host Controller Command, then this field
/// shall be cleared to 0.
pub slot_id: u8,
}
impl TypedTrb for TrbCommandCompletion {}
#[bitfield(u128)]
pub struct TrbPortStatusChangeEvent {
#[bits(24)]
__: u32,
pub port_id: u8,
__: u32,
#[bits(24)]
__: u32,
pub completion_code: u8,
#[bits(10)]
__: u16,
#[bits(6, default=TrbType::PortStatusChangeEvent)]
trb_type: TrbType,
__: u16,
}
impl TypedTrb for TrbPortStatusChangeEvent {}
#[bitfield(u128)]
pub struct TrbEnableSlotCommand {
__: u64,
__: u32,
#[bits(10)]
__: u16,
#[bits(6, default=TrbType::EnableSlotCommand)]
trb_type: TrbType,
#[bits(5)]
slot_type: u8,
#[bits(11)]
__: u16,
}
impl TypedTrb for TrbEnableSlotCommand {}
#[bitfield(u128)]
pub struct TrbAddressDeviceCommand {
pub input_context_pointer: u64,
__: u32,
#[bits(9)]
__: u16,
pub block_set_address_request: bool,
#[bits(6, default=TrbType::AddressDeviceCommand)]
trb_typ: TrbType,
__: u8,
pub slot_id: u8,
}
impl TypedTrb for TrbAddressDeviceCommand {}

View file

@ -0,0 +1,113 @@
use alloc::boxed::Box;
use mammoth::physical_box::PhysicalBox;
use crate::xhci::{
data_structures::{
DeviceContext, EndpointContextFields, EndpointState, EndpointType, InputContext,
TRDequeuePointer, TransferEvent,
},
registers::DoorbellPointer,
trb_ring::TrbRing,
};
struct DeviceContextBaseArray(PhysicalBox<[u64]>);
impl DeviceContextBaseArray {
pub fn new(max_slots: u8) -> Self {
Self(PhysicalBox::default_with_count(0, max_slots as usize + 1))
}
}
pub struct DeviceSlot {
device_context: PhysicalBox<DeviceContext>,
endpoint_0_transfer_ring: TrbRing<TransferEvent>,
doorbell: DoorbellPointer,
}
impl DeviceSlot {
fn new(doorbell: DoorbellPointer) -> Self {
Self {
device_context: PhysicalBox::new(DeviceContext::default()),
endpoint_0_transfer_ring: TrbRing::new(),
doorbell,
}
}
pub fn send_control_command(&mut self) {
self.doorbell.ring(1);
}
}
pub struct DeviceSlotManager {
device_context_base_array: DeviceContextBaseArray,
slots: Box<[Option<DeviceSlot>]>,
doorbells: Box<[Option<DoorbellPointer>]>,
}
impl DeviceSlotManager {
pub fn new(max_slots: u8, doorbells: Box<[DoorbellPointer]>) -> Self {
assert!(
doorbells.len() == max_slots as usize,
"Got an incorrect doorbell slice size."
);
Self {
device_context_base_array: DeviceContextBaseArray::new(max_slots),
slots: core::iter::repeat_with(|| None)
.take(max_slots as usize)
.collect(),
doorbells: doorbells.into_iter().map(|d| Some(d)).collect(),
}
}
pub fn device_context_base_array_physical_address(&self) -> usize {
self.device_context_base_array.0.physical_address()
}
/// Prepares a slot and an input context for an address device command.
///
/// Follows section 4.6.5 of the XHCI spec.
pub fn prep_slot_for_address_device(
&mut self,
slot_id: u8,
port_number: u8,
) -> PhysicalBox<InputContext> {
// TODO: Ensure alignment
let device_slot = DeviceSlot::new(
self.doorbells[(slot_id - 1) as usize]
.take()
.expect("Slot already allocated."),
);
let mut input_context = PhysicalBox::new(InputContext::default());
// The Add Context flags for the Slot Context and the Endpoint 0 Context shall be set to 1.
input_context.input_control_context.add_context_flags = 0x3;
// See XHCI 4.5.2 for information
input_context.slot_context.fields = input_context
.slot_context
.fields
.with_root_hub_port_number(port_number)
.with_route_string(0)
.with_context_entries(1)
.with_interrupter_target(0);
// The Endpoint 0 Context data structure in the
// Input Context shall define valid values for the TR Dequeue Pointer, EP Type, Error
// Count (CErr), and Max Packet Size fields. The MaxPStreams, Max Burst Size, and
// EP State values shall be cleared to '0'
input_context.endpoint_context_0.tr_deque_pointer = TRDequeuePointer::new()
.with_pointer(device_slot.endpoint_0_transfer_ring.physical_base_address() as u64)
.with_dequeue_cycle_state(true);
input_context.endpoint_context_0.fields = EndpointContextFields::new()
.with_endpoint_type(EndpointType::Control)
.with_max_primary_streams(0)
.with_max_burst_size(0)
.with_endpoint_state(EndpointState::Disabled);
self.device_context_base_array.0[slot_id as usize] =
device_slot.device_context.physical_address() as u64;
self.slots[slot_id as usize - 1] = Some(device_slot);
input_context
}
}

View file

@ -0,0 +1,282 @@
use alloc::sync::Arc;
use mammoth::sync::Mutex;
use mammoth::task::Spawner;
use mammoth::task::Task;
use super::registers::{self};
use crate::xhci::data_structures::AddressDeviceCommand;
use crate::xhci::data_structures::CommandCompletionCode;
use crate::xhci::data_structures::CommandCompletionEvent;
use crate::xhci::data_structures::CommandTrb;
use crate::xhci::data_structures::EnableSlotCommand;
use crate::xhci::data_structures::EventTrb;
use crate::xhci::data_structures::NoOpCommand;
use crate::xhci::data_structures::PortStatusChangeEvent;
use crate::xhci::device_context_base_array::DeviceSlotManager;
use crate::xhci::interrupter::Interrupter;
use crate::xhci::registers::DoorbellPointer;
use crate::xhci::registers::HostControllerOperationalWrapper;
use crate::xhci::registers::InterrupterRegisterSet;
use crate::xhci::registers::PortStatusAndControl;
use crate::xhci::trb_ring::CommandRing;
pub struct XHCIDriver {
#[allow(dead_code)]
pci_device: pci::PciDevice,
capabilities: registers::HostControllerCapabilities,
operational: HostControllerOperationalWrapper,
command_ring: Mutex<CommandRing>,
// TODO: Add multiple interrupters.
interrupter: Mutex<Interrupter>,
device_slot_manager: Mutex<DeviceSlotManager>,
}
impl XHCIDriver {
pub fn from_pci_device(mut pci_device: pci::PciDevice) -> Self {
let address =
((pci_device.header().bars[1] as usize) << 32) | (pci_device.header().bars[0] as usize);
let irq_port_cap = pci_device.register_msi().unwrap();
let (operational, capabilities) = HostControllerOperationalWrapper::new(address);
let max_slots = capabilities.params_1.max_device_slots();
let doorbell_physical = address + capabilities.doorbell_offset as usize;
let (command_doorbell, slot_doorbells) =
DoorbellPointer::create_command_and_slots(doorbell_physical, max_slots);
// Offset to skip the mfindex register.
let interrupter_registers = mammoth::mem::map_direct_physical_and_leak(
address + capabilities.runtime_register_space_offset as usize,
size_of::<InterrupterRegisterSet>() * 2,
);
let interrupter_registers = unsafe { interrupter_registers.add(1) };
let mut driver = Self {
pci_device,
capabilities,
operational,
command_ring: Mutex::new(CommandRing::new(command_doorbell)),
interrupter: Mutex::new(Interrupter::new(interrupter_registers, irq_port_cap)),
device_slot_manager: Mutex::new(DeviceSlotManager::new(max_slots, slot_doorbells)),
};
driver.initialize();
driver
}
fn initialize(&mut self) {
#[cfg(feature = "debug")]
mammoth::debug!("Stopping XHCI Controller.");
// Stop the host controller.
self.operational
.update_command(|cmd| cmd.with_run_stop(false));
#[cfg(feature = "debug")]
mammoth::debug!("Waiting for controller to halt.");
// Sleep until the controller is halted.
let mut status = self.operational.read_status();
while !status.host_controller_halted() {
// TODO: Sleep for how long?
mammoth::syscall::thread_sleep(50).unwrap();
status = self.operational.read_status();
}
#[cfg(feature = "debug")]
mammoth::debug!("Resetting Controller.");
self.operational
.update_command(|cmd| cmd.with_host_controller_reset(true));
let mut command: registers::UsbCommand = self.operational.read_command();
while command.host_controller_reset() {
// TODO: Sleep for how long?
mammoth::syscall::thread_sleep(50).unwrap();
command = self.operational.read_command();
}
#[cfg(feature = "debug")]
mammoth::debug!("XHCI Controller Reset, waiting ready.");
let mut status = self.operational.read_status();
while status.controller_not_ready() {
// TODO: Sleep for how long?
mammoth::syscall::thread_sleep(50).unwrap();
status = self.operational.read_status();
}
#[cfg(feature = "debug")]
mammoth::debug!("XHCI Controller Ready.");
#[cfg(feature = "debug")]
mammoth::debug!("Setting Command Ring");
self.operational.set_command_ring_dequeue_pointer(
self.command_ring.lock().trb_ring.physical_base_address(),
true,
);
#[cfg(feature = "debug")]
mammoth::debug!("Setting DCBA.");
self.operational
.set_device_context_base_address_array_pointer(
self.device_slot_manager
.lock()
.device_context_base_array_physical_address(),
);
// We tell the controller that we can support as many slots as it does because
// we allocate a full 4K page to the DCBA, which is 256 entries and the max
// slots are 255.
self.operational.update_configure(|cfg| {
cfg.with_max_device_slots_enabled(self.capabilities.params_1.max_device_slots())
});
assert!(
self.capabilities.params_2.max_scratchpad_buffers() == 0,
"Unsupported scratchpad buffers."
);
#[cfg(feature = "debug")]
mammoth::debug!("Resetting event ring.");
// SAFETY: The HC is stopped.
unsafe { self.interrupter.lock().reset() };
self.operational
.update_command(|cmd| cmd.with_run_stop(true).with_interrupter_enable(true));
#[cfg(feature = "debug")]
mammoth::debug!("Enabled interrupts and controller.");
}
pub fn interrupt_loop(self: Arc<Self>, spawner: Spawner) {
let completion_handler = |trb| {
self.clone().handle_completion(spawner.clone(), trb);
};
self.interrupter.lock().interrupt_loop(completion_handler);
}
fn handle_completion(self: Arc<XHCIDriver>, spawner: Spawner, trb: EventTrb) {
match trb {
EventTrb::Transfer(trb) => {
mammoth::debug!("Unhandled transfer event: {:?}", trb);
}
EventTrb::CommandCompletion(trb) => {
self.command_ring
.lock()
.trb_ring
.handle_completion(trb, trb.command_trb_pointer() as usize);
}
EventTrb::PortStatusChange(trb) => {
let self_clone = self.clone();
spawner.spawn(Task::new(async move {
self_clone.port_status_change(trb).await
}));
}
EventTrb::BandwidthRequest(trb) => {
mammoth::debug!("Unhandled bandwidth request event: {:?}", trb);
}
EventTrb::Doorbell(trb) => {
mammoth::debug!("Unhandled doorbell event: {:?}", trb);
}
EventTrb::HostController(trb) => {
mammoth::debug!("Unhandled host controller event: {:?}", trb);
}
EventTrb::DeviceNotification(trb) => {
mammoth::debug!("Unhandled device notification event: {:?}", trb);
}
EventTrb::MFINDEXWrap(trb) => {
mammoth::debug!("Unhandled MFINDEX wrap event: {:?}", trb);
}
}
}
async fn send_command(&self, trb: CommandTrb) -> CommandCompletionEvent {
// Split the future and the await so the lock is dropped before we await.
let future = { self.command_ring.lock().enqueue_command(trb) };
future.await
}
pub async fn startup(&self) {
#[cfg(feature = "debug")]
mammoth::debug!("Sending no op command.");
let result = self.send_command(NoOpCommand::new().into()).await;
assert_eq!(result.completion_code(), CommandCompletionCode::Success);
#[cfg(feature = "debug")]
mammoth::debug!("Successfully tested no op command.");
#[cfg(feature = "debug")]
mammoth::debug!("Resetting all connected ports.");
for port_index in 0..self.operational.num_ports() {
self.operational
.update_port_status(port_index, |p| p.clear_change_bits());
}
for port_index in 0..self.operational.num_ports() {
let status = self.operational.get_port(port_index).status_and_control;
if status.port_power() && status.current_connect_status() {
mammoth::debug!("Resetting port {}", port_index);
self.operational.update_port_status(port_index, |_| {
PortStatusAndControl::new()
.with_port_reset(true)
.with_port_power(true)
});
}
}
}
async fn port_status_change(self: Arc<Self>, status_change: PortStatusChangeEvent) {
// Ports are indexed from 1.
let port_id = status_change.port_id();
let port_index = (port_id - 1) as usize;
let port_status = self.operational.get_port(port_index).status_and_control;
#[cfg(feature = "debug")]
mammoth::debug!("Port status change for port {}", port_id);
if !port_status.port_reset_change() {
mammoth::debug!(
"Unknown port status event, not handling. status= {:?}",
port_status
);
return;
}
self.operational
.update_port_status(port_index, |s| s.clear_change_bits());
#[cfg(feature = "debug")]
mammoth::debug!("Enabling slot.");
let resp = self.send_command(EnableSlotCommand::new().into()).await;
assert_eq!(resp.completion_code(), CommandCompletionCode::Success);
let slot = resp.slot_id();
#[cfg(feature = "debug")]
mammoth::debug!("Creating slot data structures in slot {}.", slot);
let input_context = self
.device_slot_manager
.lock()
.prep_slot_for_address_device(slot, port_id);
#[cfg(feature = "debug")]
mammoth::debug!("Sending address device.");
let resp = self
.send_command(
AddressDeviceCommand::new()
.with_slot_id(slot)
.with_input_context_pointer(input_context.physical_address() as u64)
.into(),
)
.await;
assert_eq!(resp.completion_code(), CommandCompletionCode::Success);
}
}

View file

@ -0,0 +1,70 @@
use alloc::vec::Vec;
use crate::xhci::{
data_structures::{EventRingSegmentTable, EventTrb, TransferRequestBlock, TrbRingSegment},
trb_ring::TrbPointer,
};
pub struct EventRing {
segment_table: EventRingSegmentTable,
segments: Vec<TrbRingSegment>,
cycle_bit: bool,
trb_pointer: TrbPointer,
}
impl EventRing {
pub fn new() -> Self {
// Software maintains an Event Ring Consumer Cycle State (CCS) bit, initializing it
// to 1...
let cycle_bit = true;
let mut event_ring = Self {
segment_table: EventRingSegmentTable::new(1),
segments: [TrbRingSegment::new(100)].into(),
cycle_bit,
trb_pointer: TrbPointer::default(),
};
event_ring.segment_table[0].update_from_trb_ring(&event_ring.segments[0]);
event_ring
}
pub fn segment_table(&self) -> &EventRingSegmentTable {
&self.segment_table
}
pub fn erdp_physical_address(&self) -> usize {
self.segments[self.trb_pointer.segment_index].physical_address()
+ self.trb_pointer.segment_physical_offset()
}
fn current_trb(&self) -> TransferRequestBlock {
// TODO: These should be volatile reads.
self.segments[self.trb_pointer.segment_index][self.trb_pointer.segment_offset]
}
fn increment_pointer(&mut self) {
self.trb_pointer.segment_offset += 1;
if self.trb_pointer.segment_offset == self.segments[self.trb_pointer.segment_index].len() {
self.trb_pointer.segment_index += 1;
self.trb_pointer.segment_offset = 0;
if self.trb_pointer.segment_index == self.segments.len() {
// Wrap around to front.
self.trb_pointer.segment_index = 0;
self.cycle_bit = !self.cycle_bit;
}
}
}
pub fn get_next(&mut self) -> Option<EventTrb> {
let curr = self.current_trb();
if curr.cycle() != self.cycle_bit {
None
} else {
self.increment_pointer();
Some(curr.into())
}
}
}

View file

@ -0,0 +1,60 @@
use core::ptr::NonNull;
use mammoth::cap::Capability;
use crate::xhci::{
data_structures::{EventTrb, TransferRequestBlock},
event_ring::EventRing,
registers::{InterrupterModeration, InterrupterRegisterSet, InterrupterRegisters},
};
pub struct Interrupter {
event_ring: EventRing,
register_set: InterrupterRegisters,
irq_port_cap: Capability,
}
impl Interrupter {
pub fn new(
interrupter_register_set: NonNull<InterrupterRegisterSet>,
irq_port_cap: Capability,
) -> Self {
Self {
event_ring: EventRing::new(),
register_set: InterrupterRegisters::new(interrupter_register_set),
irq_port_cap,
}
}
// SAFETY:
// - HC Must be halted for interrupter 0.
pub unsafe fn reset(&mut self) {
// SAFETY:
// - THe segment table is size 1.
unsafe {
self.register_set.set_event_ring(
self.event_ring.segment_table(),
self.event_ring.erdp_physical_address(),
);
}
self.register_set.set_moderation(
InterrupterModeration::new()
.with_interrupt_moderation_interval(4000)
.with_interrupt_moderation_counter(0),
);
self.register_set.enable_interrupts();
}
pub fn interrupt_loop(&mut self, completion_handler: impl Fn(EventTrb)) {
loop {
let _ = mammoth::syscall::port_recv(&self.irq_port_cap, &mut [], &mut []).unwrap();
while let Some(trb) = self.event_ring.get_next() {
completion_handler(trb);
}
self.register_set
.update_dequeue_pointer_clearing_busy(self.event_ring.erdp_physical_address());
}
}
}

View file

@ -1,2 +1,7 @@
pub mod data_structures;
pub mod registers;
mod data_structures;
mod device_context_base_array;
pub mod driver;
mod event_ring;
mod interrupter;
mod registers;
mod trb_ring;

View file

@ -0,0 +1,188 @@
use core::task::{Poll, Waker};
use alloc::{collections::vec_deque::VecDeque, sync::Arc, vec::Vec};
use mammoth::sync::Mutex;
use crate::xhci::{
data_structures::{
CommandCompletionEvent, CommandTrb, TransferRequestBlock, TrbLink, TrbRingSegment, TypedTrb,
},
registers::DoorbellPointer,
};
struct TrbFutureState<T> {
/// Physical Address for the enqueued TRB.
/// Used for sanity checking.
physical_address: usize,
waker: Option<Waker>,
response: Option<T>,
}
#[derive(Clone)]
pub struct TrbFuture<T> {
state: Arc<Mutex<TrbFutureState<T>>>,
}
impl<T> TrbFuture<T> {
fn new(paddr: usize) -> Self {
Self {
state: Arc::new(Mutex::new(TrbFutureState {
physical_address: paddr,
waker: None,
response: None,
})),
}
}
}
impl<T: Copy> Future for TrbFuture<T> {
type Output = T;
fn poll(
self: core::pin::Pin<&mut Self>,
cx: &mut core::task::Context<'_>,
) -> core::task::Poll<Self::Output> {
let mut state = self.state.lock();
match state.response {
Some(trb) => Poll::Ready(trb),
None => {
state.waker = Some(cx.waker().clone());
Poll::Pending
}
}
}
}
#[derive(Default, Copy, Clone, Debug)]
pub struct TrbPointer {
/// Index into the vector of trb segments.
pub segment_index: usize,
/// Index into the specific segment.
/// This is a TransferRequestBlock index,
/// to get the physical_offset use segment_physical_offset()
pub segment_offset: usize,
}
impl TrbPointer {
pub fn segment_physical_offset(&self) -> usize {
self.segment_offset * size_of::<TransferRequestBlock>()
}
}
pub struct TrbRing<T> {
segments: Vec<TrbRingSegment>,
enqueue_pointer: TrbPointer,
cycle_bit: bool,
pending_futures: VecDeque<TrbFuture<T>>,
}
impl<T: Clone> TrbRing<T> {
pub fn new() -> Self {
Self {
// TODO: What size and count should this be.
segments: alloc::vec![TrbRingSegment::new(100)],
enqueue_pointer: TrbPointer::default(),
// Start with this as true so we are flipping bits from 0 (default) to 1
// to mark the enqueue pointer.
cycle_bit: true,
pending_futures: VecDeque::new(),
}
}
pub fn physical_base_address(&self) -> usize {
self.segments[0].physical_address()
}
fn physical_address_of_enqueue_pointer(&self) -> usize {
self.segments[self.enqueue_pointer.segment_index].physical_address()
+ self.enqueue_pointer.segment_physical_offset()
}
pub fn enqueue_trb(&mut self, trb: TransferRequestBlock) -> TrbFuture<T> {
let paddr = self.physical_address_of_enqueue_pointer();
*self.next_trb_ref() = trb.with_cycle(self.cycle_bit);
self.advance_enqueue_pointer();
let future = TrbFuture::new(paddr);
self.pending_futures.push_back(future.clone());
future
}
fn next_trb_ref(&mut self) -> &mut TransferRequestBlock {
&mut self.segments[self.enqueue_pointer.segment_index][self.enqueue_pointer.segment_offset]
}
fn advance_enqueue_pointer(&mut self) {
self.enqueue_pointer.segment_offset += 1;
if self.enqueue_pointer.segment_offset
== self.segments[self.enqueue_pointer.segment_index].len() - 1
{
// We have reached the end of the segment, insert a link trb.
// Increment the segment index with wrapping.
let next_segment_index =
if self.enqueue_pointer.segment_index + 1 == self.segments.len() {
0
} else {
self.enqueue_pointer.segment_index + 1
};
let next_segment_pointer = self.segments[next_segment_index].physical_address();
let toggle_cycle = next_segment_index == 0;
*self.next_trb_ref() = TrbLink::new()
.with_ring_segment_pointer(next_segment_pointer as u64)
.with_cycle(self.cycle_bit)
.with_toggle_cycle(toggle_cycle)
.to_trb();
// Flip toggle cycle bit if necessary.
self.cycle_bit ^= toggle_cycle;
self.enqueue_pointer = TrbPointer {
segment_index: next_segment_index,
segment_offset: 0,
};
}
}
pub fn handle_completion(&mut self, completion_trb: T, physical_address: usize) {
let completion = self.pending_futures.pop_front().unwrap();
let mut completion = completion.state.lock();
// TODO: Handle recovery scenarios here.
assert!(
completion.physical_address == physical_address,
"Got an unexpected command completion. Expected: {:0x}, Got: {:0x}",
completion.physical_address,
physical_address
);
completion.response = Some(completion_trb);
if let Some(waker) = &completion.waker {
waker.wake_by_ref();
}
}
}
pub struct CommandRing {
pub trb_ring: TrbRing<CommandCompletionEvent>,
doorbell: DoorbellPointer,
}
impl CommandRing {
pub fn new(doorbell: DoorbellPointer) -> Self {
Self {
trb_ring: TrbRing::new(),
doorbell,
}
}
// We have to explicitly return a future her
pub fn enqueue_command(&mut self, command: CommandTrb) -> TrbFuture<CommandCompletionEvent> {
let fut = self.trb_ring.enqueue_trb(command.into());
// Command Doorbell is always 0.
self.doorbell.ring(0);
fut
}
}