Rust XHCI Implementation.

This commit is contained in:
Drew 2025-12-05 22:01:13 -08:00
parent 8b022a6b24
commit 7bbbf79bd6
26 changed files with 1901 additions and 209 deletions

44
rust/Cargo.lock generated
View file

@ -2,12 +2,6 @@
# It is not intended for manual editing.
version = 4
[[package]]
name = "autocfg"
version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0"
[[package]]
name = "bitfield-struct"
version = "0.8.0"
@ -80,11 +74,10 @@ dependencies = [
[[package]]
name = "lock_api"
version = "0.4.12"
version = "0.4.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17"
checksum = "224399e74b87b5f3557511d98dff8b14089b3dadafcab6bb93eab67d3aace965"
dependencies = [
"autocfg",
"scopeguard",
]
@ -105,9 +98,9 @@ dependencies = [
[[package]]
name = "prettyplease"
version = "0.2.20"
version = "0.2.37"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5f12335488a2f3b0a83b14edad48dca9879ce89b2edd10e80237e4e852dd645e"
checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b"
dependencies = [
"proc-macro2",
"syn",
@ -115,18 +108,18 @@ dependencies = [
[[package]]
name = "proc-macro2"
version = "1.0.86"
version = "1.0.103"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77"
checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8"
dependencies = [
"unicode-ident",
]
[[package]]
name = "quote"
version = "1.0.36"
version = "1.0.42"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7"
checksum = "a338cc41d27e6cc6dce6cefc13a0729dfbb81c262b1f519331575dd80ef3067f"
dependencies = [
"proc-macro2",
]
@ -148,9 +141,9 @@ dependencies = [
[[package]]
name = "syn"
version = "2.0.72"
version = "2.0.111"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dc4b9b9bf2add8093d3f2c0204471e951b2285580335de42f9d2534f3ae7a8af"
checksum = "390cc9a294ab71bdb1aa2e99d13be9c753cd2d7bd6560c77118597410c4d2e87"
dependencies = [
"proc-macro2",
"quote",
@ -178,15 +171,15 @@ dependencies = [
[[package]]
name = "unicode-ident"
version = "1.0.12"
version = "1.0.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5"
[[package]]
name = "unicode-segmentation"
version = "1.11.0"
version = "1.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202"
checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493"
[[package]]
name = "victoriafalls"
@ -200,12 +193,21 @@ dependencies = [
"yunqc",
]
[[package]]
name = "volatile"
version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "af8ca9a5d4debca0633e697c88269395493cebf2e10db21ca2dbde37c1356452"
[[package]]
name = "voyageurs"
version = "0.1.0"
dependencies = [
"bitfield-struct 0.12.1",
"mammoth",
"pci",
"volatile",
"yellowstone-yunq",
]
[[package]]

View file

@ -3,7 +3,6 @@ use crate::syscall;
use crate::zion::ZError;
use alloc::slice;
use core::fmt::Debug;
use core::ops::Deref;
use core::ptr::{addr_of, addr_of_mut, read_volatile, write_volatile, NonNull};
#[cfg(feature = "hosted")]
@ -78,6 +77,10 @@ impl MemoryRegion {
})
}
pub fn vaddr(&self) -> usize {
self.virt_addr as usize
}
pub fn slice<T>(&self) -> &[T] {
unsafe {
slice::from_raw_parts(
@ -96,7 +99,7 @@ impl MemoryRegion {
}
}
pub fn zero_region(&self) {
pub fn zero_region(&mut self) {
for i in self.mut_slice() {
*i = 0;
}
@ -114,8 +117,9 @@ impl MemoryRegion {
/// Creates a reference from a given offset.
///
/// SAFETY: Caller must ensure that the memory pointed to by this
/// pointer must not get mutated while the reference exists.
/// # Safety
/// - Caller must ensure that the memory pointed to by this
/// pointer must not get mutated while the reference exists.
pub unsafe fn as_ref_at_offset<T>(&self, offset: usize) -> &T {
let ptr: *const T = self.raw_ptr_at_offset(offset as u64);
assert!(ptr.is_aligned(), "");
@ -128,9 +132,10 @@ impl MemoryRegion {
/// Creates a reference from a given offset.
///
/// SAFETY: Caller must ensure that this is the only reference to the memory pointed
/// to by this pointer.
pub unsafe fn as_mut_ref_at_offset<T>(&self, offset: usize) -> &mut T {
/// # Safety
/// - Caller must ensure that this is the only reference to the memory pointed
/// to by this pointer.
pub unsafe fn as_mut_ref_at_offset<T>(&mut self, offset: usize) -> &mut T {
let ptr: *const T = self.raw_ptr_at_offset(offset as u64);
assert!(ptr.is_aligned(), "");
// SAFETY:
@ -246,11 +251,12 @@ pub fn map_cap_and_leak(mem_cap: Capability) -> u64 {
vaddr
}
pub fn map_direct_physical_and_leak(paddr: u64, size: u64) -> u64 {
let mem_cap = syscall::memory_object_direct_physical(paddr, size).unwrap();
pub fn map_direct_physical_and_leak<T>(paddr: usize, size: usize) -> NonNull<T> {
let mem_cap = syscall::memory_object_direct_physical(paddr as u64, size as u64).unwrap();
let vaddr = syscall::address_space_map(&mem_cap).unwrap();
mem_cap.release();
vaddr
// UNWRAP: The kernel guarantees this is valid.
NonNull::new(vaddr as *mut T).unwrap()
}
pub fn map_physical_and_leak(size: u64) -> (u64, u64) {

View file

@ -4,17 +4,34 @@ use core::{
ptr::NonNull,
};
use alloc::{slice, vec::Vec};
use alloc::{boxed::Box, slice, vec::Vec};
use crate::mem::MemoryRegion;
pub struct PhysicalBox<T: ?Sized> {
data: NonNull<T>,
#[allow(dead_code)]
region: MemoryRegion,
physical_address: usize,
_marker: PhantomData<T>,
}
impl<T> PhysicalBox<T> {
pub fn new(data: T) -> Self {
let (memory_region, paddr) =
MemoryRegion::contiguous_physical(size_of::<T>() as u64).expect("Failed to allocate");
// UNWRAP: We know this isn't null.
let ptr = NonNull::new(memory_region.mut_ptr_at_offset(0)).unwrap();
unsafe { ptr.write(data) };
Self {
data: ptr,
region: memory_region,
physical_address: paddr as usize,
_marker: PhantomData,
}
}
}
impl<T: ?Sized> PhysicalBox<T> {
pub fn physical_address(&self) -> usize {
self.physical_address
@ -50,7 +67,7 @@ impl<T> PhysicalBox<[T]> {
{
let layout = core::alloc::Layout::array::<T>(len).expect("Layout overflow");
// TODO: Implement a function like alloc that takes a layout. let (memory_region, paddr) =
// TODO: Implement a function like alloc that takes a layout.
let (memory_region, paddr) =
MemoryRegion::contiguous_physical(layout.size() as u64).expect("Failed to allocate");
@ -72,7 +89,7 @@ impl<T> PhysicalBox<[T]> {
}
}
pub fn from_vec(mut vec: Vec<T>) -> Self {
pub fn from_vec(vec: Vec<T>) -> Self {
let len = vec.len();
let layout = core::alloc::Layout::array::<T>(len).expect("Layout overflow");
@ -100,6 +117,10 @@ impl<T> PhysicalBox<[T]> {
pub fn len(&self) -> usize {
(**self).len()
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
}
impl<I, T> Index<I> for PhysicalBox<[T]>
@ -122,6 +143,13 @@ where
}
}
/// SAFETY: We are the only owner of this pointer.
unsafe impl<T: ?Sized> Send for PhysicalBox<T> where Box<T>: Send {}
/// SAFETY: You must have a mutable reference to this
/// type to modify the data at the pointer.
unsafe impl<T: ?Sized> Sync for PhysicalBox<T> where Box<T>: Sync {}
impl<T: ?Sized> Drop for PhysicalBox<T> {
fn drop(&mut self) {
// SAFETY:

View file

@ -122,6 +122,7 @@ impl Executor {
}
}
#[derive(Clone)]
pub struct Spawner {
tasks: Arc<Mutex<BTreeMap<TaskId, Task>>>,
task_queue: Arc<Mutex<VecDeque<TaskId>>>,

View file

@ -71,15 +71,15 @@ impl PciDevice {
control.capable_address_64(),
"We don't handle the non-64bit case for MSI yet."
);
assert!(
control.multi_message_capable() == 0,
"We don't yet handle multi-message capable devices."
);
if control.multi_message_capable() != 0 {
mammoth::debug!("WARN: We don't yet handle multi-message capable devices.");
}
// FIXME: These probably need to be volatile writes.
let header: &mut PciDeviceHeader = self.memory_region.as_mut();
header.command = header.command.with_interrupt_disable(true);
msi_cap.msi_control = control.with_msi_enable(true);
msi_cap.msi_control = control.with_msi_enable(true).with_multi_message_enable(0);
// For setting addr and data field, see intel ref
// Vol 3. Section 11.11

View file

@ -1,3 +1,5 @@
use core::ffi::c_void;
use alloc::sync::Arc;
use mammoth::{
cap::Capability,
@ -26,8 +28,11 @@ impl AhciController {
pub fn new(pci_memory: Capability) -> Self {
let pci_device = PciDevice::from_cap(pci_memory).unwrap();
let hba_vaddr =
mem::map_direct_physical_and_leak(pci_device.header().bars[5] as u64, 0x1100);
let hba_vaddr = mem::map_direct_physical_and_leak::<c_void>(
pci_device.header().bars[5] as usize,
0x1100,
)
.as_ptr() as u64;
let hba = unsafe { (hba_vaddr as *mut AhciHba).as_mut().unwrap() };
let mut controller = Self {
pci_device: Mutex::new(pci_device),

View file

@ -6,3 +6,10 @@ edition = "2024"
[dependencies]
bitfield-struct = "0.12"
mammoth = { path = "../../lib/mammoth/" }
pci = { path = "../../lib/pci" }
volatile = "0.6.1"
yellowstone-yunq = { version = "0.1.0", path = "../../lib/yellowstone" }
[features]
default = ["debug"]
debug = []

View file

@ -5,12 +5,47 @@ extern crate alloc;
mod xhci;
use mammoth::{debug, define_entry, zion::z_err_t};
use alloc::sync::Arc;
use mammoth::{
cap::Capability,
debug, define_entry,
sync::Mutex,
task::{Executor, Task},
zion::z_err_t,
};
use pci::PciDevice;
use xhci::driver::XHCIDriver;
define_entry!();
#[unsafe(no_mangle)]
extern "C" fn main() -> z_err_t {
debug!("In Voyageurs");
#[cfg(feature = "debug")]
debug!("Voyageurs Starting.");
let yellowstone = yellowstone_yunq::from_init_endpoint();
let xhci_info = yellowstone
.get_xhci_info()
.expect("Failed to get XHCI info from yellowstone.");
let pci_device = PciDevice::from_cap(Capability::take(xhci_info.xhci_region)).unwrap();
let xhci_driver = Arc::new(XHCIDriver::from_pci_device(pci_device));
let executor = Arc::new(Mutex::new(Executor::new()));
let driver_clone = xhci_driver.clone();
let spawner = executor.clone().lock().new_spawner();
let interrupt_thread = mammoth::thread::spawn(move || driver_clone.interrupt_loop(spawner));
executor
.clone()
.lock()
.spawn(Task::new(async move { xhci_driver.startup().await }));
executor.clone().lock().run();
interrupt_thread.join().unwrap();
0
}

View file

@ -0,0 +1,71 @@
use bitfield_struct::bitfield;
use crate::xhci::data_structures::{TransferRequestBlock, TrbType};
pub struct EnableSlotTrb {}
#[bitfield(u128)]
pub struct EnableSlotCommand {
__: u64,
__: u32,
#[bits(10)]
__: u16,
#[bits(6, default=TrbType::EnableSlotCommand)]
trb_type: TrbType,
#[bits(5)]
slot_type: u8,
#[bits(11)]
__: u16,
}
impl From<EnableSlotCommand> for CommandTrb {
fn from(value: EnableSlotCommand) -> Self {
Self(value.into_bits().into())
}
}
#[bitfield(u128)]
pub struct AddressDeviceCommand {
pub input_context_pointer: u64,
__: u32,
#[bits(9)]
__: u16,
pub block_set_address_request: bool,
#[bits(6, default=TrbType::AddressDeviceCommand)]
trb_typ: TrbType,
__: u8,
pub slot_id: u8,
}
impl From<AddressDeviceCommand> for CommandTrb {
fn from(value: AddressDeviceCommand) -> Self {
Self(value.into_bits().into())
}
}
#[bitfield(u128)]
pub struct NoOpCommand {
__: u64,
__: u32,
cycle: bool,
#[bits(9)]
__: u16,
#[bits(6, default = TrbType::NoOpCommand)]
trb_type: TrbType,
__: u16,
}
impl From<NoOpCommand> for CommandTrb {
fn from(value: NoOpCommand) -> Self {
Self(value.into_bits().into())
}
}
/// Simple type to ensure we are only sending commands to command rings.
pub struct CommandTrb(TransferRequestBlock);
impl From<CommandTrb> for TransferRequestBlock {
fn from(value: CommandTrb) -> Self {
value.0
}
}

View file

@ -0,0 +1,155 @@
use bitfield_struct::{bitenum, bitfield};
use crate::xhci::data_structures::{TransferRequestBlock, TrbType};
#[bitenum]
#[repr(u8)]
#[derive(Debug, Eq, PartialEq)]
pub enum CommandCompletionCode {
#[fallback]
#[allow(dead_code)]
Invalid = 0,
Success = 1,
DataBufferError = 2,
BabbleDetectedError = 3,
UsbTransactionError = 4,
TrbError = 5,
StallError = 6,
ResourceError = 7,
BandwidthError = 8,
NoSlotsAvailable = 9,
InvalidStreamType = 10,
SlotNotEnabled = 11,
EndpointNotEnabled = 12,
ShortPacket = 13,
RingUnderrun = 14,
RingOverrun = 15,
VFEventRingFull = 16,
ParameterError = 17,
}
#[bitfield(u128)]
pub struct TransferEvent {
pub transfer_trb_pointer: u64,
#[bits(24)]
pub trb_transfer_lenght: u32,
/// Completion Code. This field encodes the completion status of the command that generated the
/// event. Refer to the respective command definition for a list of the possible Completion Codes
/// associated with the command. Refer to section 6.4.5 for an enumerated list of possible error
/// conditions.
#[bits(8)]
pub completion_code: CommandCompletionCode,
#[bits(10)]
__: u16,
/// TRB Type. This field identifies the type of the TRB. Refer to Table 6-91 for the definition of the
/// Command Completion Event TRB type ID
#[bits(6, default=TrbType::TransferEvent)]
pub trb_type: TrbType,
#[bits(5)]
pub endpoint_id: u8,
#[bits(3)]
__: u8,
pub slot_id: u8,
}
#[bitfield(u128)]
pub struct CommandCompletionEvent {
/// Command TRB Pointer Hi and Lo. This field represents the high order bits of the 64-bit address
/// of the Command TRB that generated this event. Note that this field is not valid for some
/// Completion Code values. Refer to Table 6-90 for specific cases.
///
/// The memory structure referenced by this physical memory pointer shall be aligned on a 16-byte
/// address boundary.
pub command_trb_pointer: u64,
/// Command Completion Parameter. This field may optionally be set by a command. Refer to
/// section 4.6.6.1 for specific usage. If a command does not utilize this field it shall be treated as
/// RsvdZ.
#[bits(24)]
pub command_completion_parameter: u64,
/// Completion Code. This field encodes the completion status of the command that generated the
/// event. Refer to the respective command definition for a list of the possible Completion Codes
/// associated with the command. Refer to section 6.4.5 for an enumerated list of possible error
/// conditions.
#[bits(8)]
pub completion_code: CommandCompletionCode,
/// Cycle bit (C). This bit is used to mark the Dequeue Pointer of an Event Ring
pub cycle_bit: bool,
#[bits(9)]
__: u16,
/// TRB Type. This field identifies the type of the TRB. Refer to Table 6-91 for the definition of the
/// Command Completion Event TRB type ID
#[bits(6, default=TrbType::CommandCompletionEvent)]
pub trb_type: TrbType,
/// VF ID. The ID of the Virtual Function that generated the event. Note that this field is valid only if
/// Virtual Functions are enabled. If they are not enabled this field shall be cleared to 0.
pub vf_id: u8,
/// Slot ID. The Slot ID field shall be updated by the xHC to reflect the slot associated with the
/// command that generated the event, with the following exceptions:
///
/// - The Slot ID shall be cleared to 0 for No Op, Set Latency Tolerance Value, Get Port Bandwidth,
/// and Force Event Commands.
///
/// - The Slot ID shall be set to the ID of the newly allocated Device Slot for the Enable Slot
/// Command.
///
/// - The value of Slot ID shall be vendor defined when generated by a vendor defined command.
///
/// This value is used as an index in the Device Context Base Address Array to select the Device
/// Context of the source device. If this Event is due to a Host Controller Command, then this field
/// shall be cleared to 0.
pub slot_id: u8,
}
#[bitfield(u128)]
pub struct PortStatusChangeEvent {
#[bits(24)]
__: u32,
pub port_id: u8,
__: u32,
#[bits(24)]
__: u32,
#[bits(8)]
pub completion_code: CommandCompletionCode,
#[bits(10)]
__: u16,
#[bits(6, default=TrbType::PortStatusChangeEvent)]
trb_type: TrbType,
__: u16,
}
pub enum EventTrb {
Transfer(TransferEvent),
CommandCompletion(CommandCompletionEvent),
PortStatusChange(PortStatusChangeEvent),
BandwidthRequest(TransferRequestBlock),
Doorbell(TransferRequestBlock),
HostController(TransferRequestBlock),
DeviceNotification(TransferRequestBlock),
MFINDEXWrap(TransferRequestBlock),
}
impl From<TransferRequestBlock> for EventTrb {
fn from(value: TransferRequestBlock) -> Self {
match value.trb_type() {
TrbType::TransferEvent => {
EventTrb::Transfer(TransferEvent::from_bits(value.into_bits()))
}
TrbType::CommandCompletionEvent => {
EventTrb::CommandCompletion(CommandCompletionEvent::from_bits(value.into_bits()))
}
TrbType::PortStatusChangeEvent => {
EventTrb::PortStatusChange(PortStatusChangeEvent::from_bits(value.into_bits()))
}
TrbType::BandwidthRequestEvent => EventTrb::BandwidthRequest(value),
TrbType::DoorbellEvent => EventTrb::Doorbell(value),
TrbType::HostControllerEvent => EventTrb::HostController(value),
TrbType::DeviceNotificationEvent => EventTrb::DeviceNotification(value),
TrbType::MFINDEXWrapEvent => EventTrb::MFINDEXWrap(value),
t => panic!("Unknown trb type on event ring: {:?}", t),
}
}
}

View file

@ -4,9 +4,9 @@ use crate::xhci::data_structures::{EndpointContext, SlotContext};
#[bitfield(u32)]
pub struct InputControlContextSettings {
configuration_value: u8,
interface_number: u8,
alternate_setting: u8,
pub configuration_value: u8,
pub interface_number: u8,
pub alternate_setting: u8,
__: u8,
}
@ -16,7 +16,7 @@ pub struct InputControlContext {
pub drop_context_flags: u32,
pub add_context_flags: u32,
__: [u32; 5],
settings: InputControlContextSettings,
pub settings: InputControlContextSettings,
}
const _: () = assert!(size_of::<InputControlContext>() == 0x20);

View file

@ -1,15 +1,21 @@
mod command_trb;
mod device_context;
mod endpoint_context;
mod event_ring_segment_table;
mod event_trb;
mod input_context;
mod slot_context;
mod transfer_trb;
mod trb;
mod trb_ring_segment;
pub use command_trb::*;
pub use device_context::*;
pub use endpoint_context::*;
pub use event_ring_segment_table::*;
pub use event_trb::*;
pub use input_context::*;
pub use slot_context::*;
pub use transfer_trb::*;
pub use trb::*;
pub use trb_ring_segment::*;

View file

@ -0,0 +1,304 @@
use bitfield_struct::{bitenum, bitfield};
use crate::xhci::data_structures::{TransferRequestBlock, TrbType};
#[bitfield(u128)]
pub struct NormalTransfer {
/// Data Buffer Pointer Hi and Lo. These fields represent the 64-bit address of the TRB data area
/// for this transaction or 8 bytes of immediate data. The Immediate Data (IDT) control flag selects
/// this option for each Normal TRB.
///
/// The memory structure referenced by this physical memory pointer is allowed to begin on a byte
/// address boundary. However, user may find other alignments, such as 64-byte or 128-byte
/// alignments, to be more efficient and provide better performance
data_buffer_pointer: u64,
/// TRB Transfer Length. For an OUT, this field defines the number of data bytes the xHC shall
/// send during the execution of this TRB. If the value of this field is 0 when the xHC fetches this
/// TRB, the xHC shall execute a zero-length transaction.
///
/// Note: If a zero-length transfer is specified, the Data Buffer Pointer field is ignored by the xHC,
/// irrespective of the state of the IDT flag. Refer to section 4.9.1 for more information on zero-
/// length Transfer TRB handling.
///
/// For an IN, the value of the field identifies the size of the data buffer referenced by the Data
/// Buffer Pointer, i.e. the number of bytes the host expects the endpoint to deliver.
///
/// Valid values are 0 to 64K.
#[bits(17)]
trb_transfer_length: u32,
/// TD Size. This field provides an indicator of the number of packets remaining in the TD. Refer to
/// section 4.10.2.4 for how this value is calculated
#[bits(5)]
td_size: u8,
/// Interrupter Target. This field defines the index of the Interrupter that will receive events
/// generated by this TRB. Valid values are between 0 and MaxIntrs-1.
#[bits(10)]
interrupter_target: u16,
/// Cycle bit (C). This bit is used to mark the Enqueue Pointer of the Transfer ring.
cycle: bool,
/// Evaluate Next TRB (ENT). If this flag is 1 the xHC shall fetch and evaluate the next TRB before
/// saving the endpoint state. Refer to section 4.12.3 for more information
evaluate_next: bool,
/// Interrupt-on Short Packet (ISP). If this flag is 1 and a Short Packet is encountered for this TRB
/// (i.e., less than the amount specified in TRB Transfer Length), then a Transfer Event TRB shall be
/// generated with its Completion Code set to Short Packet. The TRB Transfer Length field in the
/// Transfer Event TRB shall reflect the residual number of bytes not transferred into the associated
/// data buffer. In either case, when a Short Packet is encountered, the TRB shall be retired without
/// error and the xHC shall advance to the next Transfer Descriptor (TD).
///
/// Note that if the ISP and IOC flags are both 1 and a Short Packet is detected, then only one
/// Transfer Event TRB shall be queued to the Event Ring. Also refer to section 4.10.1.1
interrupt_on_short_packet: bool,
/// No Snoop (NS). When set to 1, the xHC is permitted to set the No Snoop bit in the Requester
/// Attributes of the PCIe transactions it initiates if the PCIe configuration Enable No Snoop flag is
/// also set. When cleared to 0, the xHC is not permitted to set PCIe packet No Snoop Requester
/// Attribute. Refer to section 4.18.1 for more information.
///
/// NOTE: If software sets this bit, then it is responsible for maintaining cache consistency
no_snoop: bool,
/// Chain bit (CH). Set to 1 by software to associate this TRB with the next TRB on the Ring. A
/// Transfer Descriptor (TD) is defined as one or more TRBs. The Chain bit is used to identify the
/// TRBs that comprise a TD. The Chain bit is always 0 in the last TRB of a TD
chain: bool,
/// Interrupt On Completion (IOC). If this bit is set to 1, it specifies that when this TRB completes,
/// the Host Controller shall notify the system of the completion by placing an Transfer Event TRB
/// on the Event ring and asserting an interrupt to the host at the next interrupt threshold. Note that
/// the interrupt assertion may be blocked for the Transfer Event by BEI. Refer to sections 4.10.4
/// and 4.17.5.
interrupt_on_completion: bool,
/// Immediate Data (IDT). If this bit is set to 1, it specifies that the Data Buffer Pointer field of this
/// TRB contains data, not a pointer, and the Length field shall contain a value between 0 and 8 to
/// indicate the number of valid bytes from offset 0 in the TRB that should be used as data.
///
/// Note: If the IDT flag is set in one Transfer TRB of a TD, then it shall be the only Transfer TRB of
/// the TD. An Event Data TRB may be included in the TD. Failure to follow this rule may result in
/// undefined xHC operation.
///
/// Note: IDT shall not be set (1) for TRBs on endpoints that define a Max Packet Size < 8 bytes, or
/// on IN endpoints.
immediate_data: bool,
__: bool,
__: bool,
/// Block Event Interrupt (BEI). If this bit is set to 1 and IOC = 1, then the Transfer Event
/// generated by IOC shall not assert an interrupt to the host at the next interrupt threshold. Refer
/// to section 4.17.5.
block_event_interrupt: bool,
/// TRB Type. This shall be set to Normal TRB type. Refer to Table 6-91 for the definition of the
/// valid Transfer TRB type IDs
#[bits(6, default=TrbType::Normal)]
trb_type: TrbType,
__: u16,
}
impl From<NormalTransfer> for TransferTrb {
fn from(value: NormalTransfer) -> Self {
TransferTrb(TransferRequestBlock::from_bits(value.into_bits()))
}
}
#[bitenum]
#[repr(u8)]
#[derive(Debug)]
pub enum TransferType {
NoDataStage = 0,
#[fallback]
Reserved = 1,
OutDataStage = 2,
InDataStage = 3,
}
#[bitfield(u128)]
pub struct SetupStage {
/// These 5 fields are specified in Table 9-3. Format of Setup Data.
pub request_type: u8,
pub request: u8,
pub value: u16,
pub index: u16,
pub length: u16,
/// TRB Transfer Length. Always 8.
#[bits(17, default=8, access=RO)]
trb_transfer_length: u32,
#[bits(5)]
__: u8,
/// Interrupter Target. This field defines the index of the Interrupter that will receive events
/// generated by this TRB. Valid values are between 0 and MaxIntrs-1.
#[bits(10)]
pub interrupter_target: u16,
/// Cycle bit (C). This bit is used to mark the Enqueue point of a Transfer ring.
cycle: bool,
#[bits(4)]
__: u8,
/// Interrupt On Completion (IOC). If this bit is set to 1, it specifies that when this TRB completes,
/// the Host Controller shall notify the system of the completion by placing an Event TRB on the
/// Event ring and sending an interrupt at the next interrupt threshold. Refer to section 4.10.4
#[bits(default=false, access=RO)]
interrupt_on_completion: bool,
/// Immediate Data (IDT). This bit shall be set to 1 in a Setup Stage TRB. It specifies that the
/// Parameter component of this TRB contains Setup Data.
#[bits(default=true, access=RO)]
immediate_data: bool,
#[bits(3)]
__: u8,
/// TRB Type. This shall be set to Normal TRB type. Refer to Table 6-91 for the definition of the
/// valid Transfer TRB type IDs
#[bits(6, default=TrbType::SetupStage, access=RO)]
trb_type: TrbType,
#[bits(2)]
pub transfer_type: TransferType,
#[bits(14)]
__: u16,
}
impl From<SetupStage> for TransferTrb {
fn from(value: SetupStage) -> Self {
TransferTrb(TransferRequestBlock::from_bits(value.into_bits()))
}
}
#[bitfield(u128)]
pub struct DataStage {
/// Data Buffer Pointer Hi and Lo. These fields represent the 64-bit address of the TRB data area
/// for this transaction.
///
/// The memory structure referenced by this physical memory pointer is allowed to begin on a byte
/// address boundary. However, user may find other alignments, such as 64-byte or 128-byte
/// alignments, to be more efficient and provide better performance
pub data_buffer_pointer: u64,
/// TRB Transfer Length. For an OUT, this field is the number of data bytes the xHC will send
/// during the execution of this TRB.
///
/// For an IN, the initial value of the field identifies the size of the data buffer referenced by the Data
/// Buffer Pointer, i.e. the number of bytes the host expects the endpoint to deliver.
///
/// Valid values are 1 to 64K.
#[bits(17)]
pub trb_transfer_length: u32,
/// TD Size. This field provides an indicator of the number of packets remaining in the TD. Refer to
/// section 4.10.2.4 for how this value is calculated
#[bits(5)]
pub td_size: u8,
/// Interrupter Target. This field defines the index of the Interrupter that will receive events
/// generated by this TRB. Valid values are between 0 and MaxIntrs-1.
#[bits(10)]
pub interrupter_target: u16,
/// Cycle bit (C). This bit is used to mark the Enqueue Pointer of the Transfer ring.
cycle: bool,
/// Evaluate Next TRB (ENT). If this flag is 1 the xHC shall fetch and evaluate the next TRB before
/// saving the endpoint state. Refer to section 4.12.3 for more information
pub evaluate_next: bool,
/// Interrupt-on Short Packet (ISP). If this flag is 1 and a Short Packet is encountered for this TRB
/// (i.e., less than the amount specified in TRB Transfer Length), then a Transfer Event TRB shall be
/// generated with its Completion Code set to Short Packet. The TRB Transfer Length field in the
/// Transfer Event TRB shall reflect the residual number of bytes not transferred into the associated
/// data buffer. In either case, when a Short Packet is encountered, the TRB shall be retired without
/// error and the xHC shall advance to the next Transfer Descriptor (TD).
///
/// Note that if the ISP and IOC flags are both 1 and a Short Packet is detected, then only one
/// Transfer Event TRB shall be queued to the Event Ring. Also refer to section 4.10.1.1
interrupt_on_short_packet: bool,
/// No Snoop (NS). When set to 1, the xHC is permitted to set the No Snoop bit in the Requester
/// Attributes of the PCIe transactions it initiates if the PCIe configuration Enable No Snoop flag is
/// also set. When cleared to 0, the xHC is not permitted to set PCIe packet No Snoop Requester
/// Attribute. Refer to section 4.18.1 for more information.
///
/// NOTE: If software sets this bit, then it is responsible for maintaining cache consistency
no_snoop: bool,
/// Chain bit (CH). Set to 1 by software to associate this TRB with the next TRB on the Ring. A
/// Transfer Descriptor (TD) is defined as one or more TRBs. The Chain bit is used to identify the
/// TRBs that comprise a TD. The Chain bit is always 0 in the last TRB of a TD
chain: bool,
/// Interrupt On Completion (IOC). If this bit is set to 1, it specifies that when this TRB completes,
/// the Host Controller shall notify the system of the completion by placing an Transfer Event TRB
/// on the Event ring and asserting an interrupt to the host at the next interrupt threshold. Note that
/// the interrupt assertion may be blocked for the Transfer Event by BEI. Refer to sections 4.10.4
/// and 4.17.5.
#[bits(default=false, access=RO)]
interrupt_on_completion: bool,
/// Immediate Data (IDT). If this bit is set to 1, it specifies that the Data Buffer Pointer field of this
/// TRB contains data, not a pointer. If IDT = 1, the Length field shall contain a value between 1 and
/// 8 to indicate the number of valid bytes from offset 0 in the TRB that should be used as data.
///
/// Note: If the IDT flag is set in one Data Stage TRB of a TD, then it shall be the only Transfer TRB of
/// the TD. An Event Data TRB may also be included in the TD. Failure to follow this rule may result
/// in undefined xHC operation.
pub immediate_data: bool,
__: bool,
__: bool,
/// Block Event Interrupt (BEI). If this bit is set to 1 and IOC = 1, then the Transfer Event
/// generated by IOC shall not assert an interrupt to the host at the next interrupt threshold. Refer
/// to section 4.17.5.
block_event_interrupt: bool,
/// TRB Type. This shall be set to Normal TRB type. Refer to Table 6-91 for the definition of the
/// valid Transfer TRB type IDs
#[bits(6, default=TrbType::DataStage, access=RO)]
trb_type: TrbType,
/// Direction (DIR). This bit indicates the direction of the data transfer as defined in the Data State
/// TRB Direction column of Table 7. If cleared to 0, the data stage transfer direction is OUT (Write
/// Data). If set to 1, the data stage transfer direction is IN (Read Data). Refer to section 4.11.2.2 for
/// more information on the use of DIR.
pub direction: bool,
#[bits(15)]
__: u16,
}
impl From<DataStage> for TransferTrb {
fn from(value: DataStage) -> Self {
TransferTrb(TransferRequestBlock::from_bits(value.into_bits()))
}
}
#[bitfield(u128)]
pub struct StatusStage {
__: u64,
#[bits(22)]
__: u32,
/// Interrupter Target. This field defines the index of the Interrupter that will receive events
/// generated by this TRB. Valid values are between 0 and MaxIntrs-1.
#[bits(10)]
pub interrupter_target: u16,
/// Cycle bit (C). This bit is used to mark the Enqueue Pointer of the Transfer ring.
cycle: bool,
/// Evaluate Next TRB (ENT). If this flag is 1 the xHC shall fetch and evaluate the next TRB before
/// saving the endpoint state. Refer to section 4.12.3 for more information
evaluate_next: bool,
#[bits(2)]
__: u8,
/// Chain bit (CH). Set to 1 by software to associate this TRB with the next TRB on the Ring. A
/// Transfer Descriptor (TD) is defined as one or more TRBs. The Chain bit is used to identify the
/// TRBs that comprise a TD. The Chain bit is always 0 in the last TRB of a TD
chain: bool,
/// Interrupt On Completion (IOC). If this bit is set to 1, it specifies that when this TRB completes,
/// the Host Controller shall notify the system of the completion by placing an Transfer Event TRB
/// on the Event ring and asserting an interrupt to the host at the next interrupt threshold. Note that
/// the interrupt assertion may be blocked for the Transfer Event by BEI. Refer to sections 4.10.4
/// and 4.17.5.
pub interrupt_on_completion: bool,
#[bits(4)]
__: u8,
/// TRB Type. This shall be set to Normal TRB type. Refer to Table 6-91 for the definition of the
/// valid Transfer TRB type IDs
#[bits(6, default=TrbType::StatusStage, access=RO)]
trb_type: TrbType,
/// Direction (DIR). This bit indicates the direction of the data transfer as defined in the Status State
/// TRB Direction column of Table 7. If cleared to 0, the status stage transfer direction is OUT
/// (Host-to-device). If set to 1, the status stage transfer direction is IN (Device-to-host). Refer to
/// section 4.11.2.2 for more information on the use of DIR.
pub direction: bool,
#[bits(15)]
__: u16,
}
impl From<StatusStage> for TransferTrb {
fn from(value: StatusStage) -> Self {
TransferTrb(TransferRequestBlock::from_bits(value.into_bits()))
}
}
#[derive(Clone)]
pub struct TransferTrb(TransferRequestBlock);
impl From<TransferTrb> for TransferRequestBlock {
fn from(value: TransferTrb) -> Self {
value.0
}
}

View file

@ -75,29 +75,6 @@ where
}
}
#[bitfield(u128)]
pub struct TrbNoOp {
__: u64,
#[bits(22)]
__: u32,
#[bits(10, default = 0)]
interrupter_target: u16,
cycle: bool,
evaluate_next: bool,
__: bool,
__: bool,
chain: bool,
#[bits(default = true)]
interrupt_on_completion: bool,
#[bits(4)]
__: u8,
#[bits(6, default = TrbType::NoOpCommand)]
trb_type: TrbType,
__: u16,
}
impl TypedTrb for TrbNoOp {}
#[bitfield(u128)]
pub struct TrbLink {
/// Ring Segment Pointer Hi and Lo. These fields represent the high order bits of the 64-bit base
@ -141,139 +118,3 @@ pub struct TrbLink {
}
impl TypedTrb for TrbLink {}
#[bitfield(u128)]
pub struct TrbTransferEvent {
pub transfer_trb_pointer: u64,
#[bits(24)]
pub trb_transfer_lenght: u32,
/// Completion Code. This field encodes the completion status of the command that generated the
/// event. Refer to the respective command definition for a list of the possible Completion Codes
/// associated with the command. Refer to section 6.4.5 for an enumerated list of possible error
/// conditions.
pub completion_code: u8,
#[bits(10)]
__: u16,
/// TRB Type. This field identifies the type of the TRB. Refer to Table 6-91 for the definition of the
/// Command Completion Event TRB type ID
#[bits(6, default=TrbType::TransferEvent)]
pub trb_type: TrbType,
#[bits(5)]
pub endpoint_id: u8,
#[bits(3)]
__: u8,
pub slot_id: u8,
}
impl TypedTrb for TrbTransferEvent {}
#[bitenum]
#[repr(u8)]
pub enum CommandCompletionCode {
#[fallback]
#[allow(dead_code)]
Invalid = 0,
Success = 1,
}
#[bitfield(u128)]
pub struct TrbCommandCompletion {
/// Command TRB Pointer Hi and Lo. This field represents the high order bits of the 64-bit address
/// of the Command TRB that generated this event. Note that this field is not valid for some
/// Completion Code values. Refer to Table 6-90 for specific cases.
///
/// The memory structure referenced by this physical memory pointer shall be aligned on a 16-byte
/// address boundary.
pub command_trb_pointer: u64,
/// Command Completion Parameter. This field may optionally be set by a command. Refer to
/// section 4.6.6.1 for specific usage. If a command does not utilize this field it shall be treated as
/// RsvdZ.
#[bits(24)]
pub command_completion_parameter: u64,
/// Completion Code. This field encodes the completion status of the command that generated the
/// event. Refer to the respective command definition for a list of the possible Completion Codes
/// associated with the command. Refer to section 6.4.5 for an enumerated list of possible error
/// conditions.
pub completion_code: u8,
/// Cycle bit (C). This bit is used to mark the Dequeue Pointer of an Event Ring
pub cycle_bit: bool,
#[bits(9)]
__: u16,
/// TRB Type. This field identifies the type of the TRB. Refer to Table 6-91 for the definition of the
/// Command Completion Event TRB type ID
#[bits(6, default=TrbType::CommandCompletionEvent)]
pub trb_type: TrbType,
/// VF ID. The ID of the Virtual Function that generated the event. Note that this field is valid only if
/// Virtual Functions are enabled. If they are not enabled this field shall be cleared to 0.
pub vf_id: u8,
/// Slot ID. The Slot ID field shall be updated by the xHC to reflect the slot associated with the
/// command that generated the event, with the following exceptions:
///
/// - The Slot ID shall be cleared to 0 for No Op, Set Latency Tolerance Value, Get Port Bandwidth,
/// and Force Event Commands.
///
/// - The Slot ID shall be set to the ID of the newly allocated Device Slot for the Enable Slot
/// Command.
///
/// - The value of Slot ID shall be vendor defined when generated by a vendor defined command.
///
/// This value is used as an index in the Device Context Base Address Array to select the Device
/// Context of the source device. If this Event is due to a Host Controller Command, then this field
/// shall be cleared to 0.
pub slot_id: u8,
}
impl TypedTrb for TrbCommandCompletion {}
#[bitfield(u128)]
pub struct TrbPortStatusChangeEvent {
#[bits(24)]
__: u32,
pub port_id: u8,
__: u32,
#[bits(24)]
__: u32,
pub completion_code: u8,
#[bits(10)]
__: u16,
#[bits(6, default=TrbType::PortStatusChangeEvent)]
trb_type: TrbType,
__: u16,
}
impl TypedTrb for TrbPortStatusChangeEvent {}
#[bitfield(u128)]
pub struct TrbEnableSlotCommand {
__: u64,
__: u32,
#[bits(10)]
__: u16,
#[bits(6, default=TrbType::EnableSlotCommand)]
trb_type: TrbType,
#[bits(5)]
slot_type: u8,
#[bits(11)]
__: u16,
}
impl TypedTrb for TrbEnableSlotCommand {}
#[bitfield(u128)]
pub struct TrbAddressDeviceCommand {
pub input_context_pointer: u64,
__: u32,
#[bits(9)]
__: u16,
pub block_set_address_request: bool,
#[bits(6, default=TrbType::AddressDeviceCommand)]
trb_typ: TrbType,
__: u8,
pub slot_id: u8,
}
impl TypedTrb for TrbAddressDeviceCommand {}

View file

@ -0,0 +1,240 @@
use alloc::boxed::Box;
use mammoth::{mem::MemoryRegion, physical_box::PhysicalBox};
use crate::xhci::{
data_structures::{
DataStage, DeviceContext, EndpointContextFields, EndpointState, EndpointType, InputContext,
InputControlContextSettings, SetupStage, StatusStage, TRDequeuePointer, TransferEvent,
TransferTrb, TransferType,
},
registers::DoorbellPointer,
trb_ring::{TrbFuture, TrbRing},
usb::{Configuration, DeviceDescriptor},
};
struct DeviceContextBaseArray(PhysicalBox<[u64]>);
impl DeviceContextBaseArray {
pub fn new(max_slots: u8) -> Self {
Self(PhysicalBox::default_with_count(0, max_slots as usize + 1))
}
}
pub struct DeviceSlot {
device_context: PhysicalBox<DeviceContext>,
input_context: PhysicalBox<InputContext>,
endpoint_0_transfer_ring: TrbRing<TransferEvent>,
doorbell: DoorbellPointer,
}
impl DeviceSlot {
fn new(doorbell: DoorbellPointer) -> Self {
Self {
device_context: PhysicalBox::new(DeviceContext::default()),
input_context: PhysicalBox::new(InputContext::default()),
endpoint_0_transfer_ring: TrbRing::new(),
doorbell,
}
}
pub fn send_control_command(&mut self, trbs: &[TransferTrb]) -> TrbFuture<TransferEvent> {
let td_len = trbs.len();
for trb in trbs.iter().take(td_len - 1) {
self.endpoint_0_transfer_ring
.enqueue_trb(trb.clone().into());
}
let fut = self
.endpoint_0_transfer_ring
.enqueue_trb_expect_interrupt(trbs[td_len - 1].clone().into());
self.doorbell.ring(1);
fut
}
pub fn read_device_descriptor(
&mut self,
) -> (TrbFuture<TransferEvent>, PhysicalBox<DeviceDescriptor>) {
let setup_stage = SetupStage::new()
.with_request_type(0b10000000)
.with_request(6)
.with_value(0x1 << 8)
.with_length(size_of::<DeviceDescriptor>() as u16)
.with_index(0)
.with_transfer_type(TransferType::InDataStage);
let descriptor = PhysicalBox::new(DeviceDescriptor::default());
let data_stage = DataStage::new()
.with_data_buffer_pointer(descriptor.physical_address() as u64)
.with_trb_transfer_length(size_of::<DeviceDescriptor>() as u32)
.with_direction(true);
let status_stage = StatusStage::new()
.with_interrupter_target(0)
.with_interrupt_on_completion(true)
.with_direction(false);
let fut = self.send_control_command(&[
setup_stage.into(),
data_stage.into(),
status_stage.into(),
]);
(fut, descriptor)
}
pub fn read_configuration_descriptor(
&mut self,
config_index: u8,
) -> (TrbFuture<TransferEvent>, MemoryRegion) {
// TODO: How should we size this.
const REGION_SIZE: u16 = 0x1000;
let (region, paddr) = MemoryRegion::contiguous_physical(REGION_SIZE as u64).unwrap();
let setup_stage = SetupStage::new()
.with_request_type(0b10000000)
.with_request(6)
.with_value(0x2 << 8 | config_index as u16)
.with_length(region.size() as u16)
.with_index(0)
.with_transfer_type(TransferType::InDataStage);
let data_stage = DataStage::new()
.with_data_buffer_pointer(paddr)
.with_trb_transfer_length(region.size() as u32)
.with_direction(true);
let status_stage = StatusStage::new()
.with_interrupter_target(0)
.with_interrupt_on_completion(true)
.with_direction(false);
let fut = self.send_control_command(&[
setup_stage.into(),
data_stage.into(),
status_stage.into(),
]);
(fut, region)
}
pub fn prep_configure_endpoint(
&mut self,
configuration: &Configuration,
configuration_information_enable: bool,
) -> usize {
// TODO: Handle multiple.
let endpoint = configuration.interfaces[0].endpoints[0];
let endpoint_index = endpoint.endpoint_index();
self.input_context.input_control_context.add_context_flags = 1 | 1 << endpoint_index;
self.input_context.input_control_context.drop_context_flags = 0;
self.input_context.input_control_context.settings = if configuration_information_enable {
InputControlContextSettings::new()
.with_configuration_value(configuration.configuration.configuration_value)
} else {
InputControlContextSettings::new()
};
self.input_context
.slot_context
.fields
.set_context_entries(endpoint_index + 1);
let ep_context = &mut self.input_context.endpoint_contexts[endpoint_index as usize - 1];
ep_context.fields = EndpointContextFields::new()
.with_interval(endpoint.interval)
.with_endpoint_type(endpoint.endpoint_type())
.with_max_packet_size(endpoint.max_packet_size);
// TODO: Prep Event Ring.
self.input_context.physical_address()
}
pub fn handle_completion(&mut self, trb: TransferEvent) {
if trb.endpoint_id() != 0x1 {
mammoth::debug!("Unhandled endpoint id {}", trb.endpoint_id());
return;
}
self.endpoint_0_transfer_ring
.handle_completion(trb, trb.transfer_trb_pointer() as usize);
}
}
pub struct DeviceSlotManager {
device_context_base_array: DeviceContextBaseArray,
slots: Box<[Option<DeviceSlot>]>,
doorbells: Box<[Option<DoorbellPointer>]>,
}
impl DeviceSlotManager {
pub fn new(max_slots: u8, doorbells: Box<[DoorbellPointer]>) -> Self {
assert!(
doorbells.len() == max_slots as usize,
"Got an incorrect doorbell slice size."
);
Self {
device_context_base_array: DeviceContextBaseArray::new(max_slots),
slots: core::iter::repeat_with(|| None)
.take(max_slots as usize)
.collect(),
doorbells: doorbells.into_iter().map(Some).collect(),
}
}
pub fn device_context_base_array_physical_address(&self) -> usize {
self.device_context_base_array.0.physical_address()
}
/// Prepares a slot and an input context for an address device command.
///
/// Follows section 4.6.5 of the XHCI spec.
pub fn prep_slot_for_address_device(&mut self, slot_id: u8, port_number: u8) -> usize {
// TODO: Ensure alignment
let mut device_slot = DeviceSlot::new(
self.doorbells[(slot_id - 1) as usize]
.take()
.expect("Slot already allocated."),
);
// The Add Context flags for the Slot Context and the Endpoint 0 Context shall be set to 1.
device_slot
.input_context
.input_control_context
.add_context_flags = 0x3;
// See XHCI 4.5.2 for information
device_slot.input_context.slot_context.fields = device_slot
.input_context
.slot_context
.fields
.with_root_hub_port_number(port_number)
.with_route_string(0)
.with_context_entries(1)
.with_interrupter_target(0);
// The Endpoint 0 Context data structure in the
// Input Context shall define valid values for the TR Dequeue Pointer, EP Type, Error
// Count (CErr), and Max Packet Size fields. The MaxPStreams, Max Burst Size, and
// EP State values shall be cleared to '0'
device_slot
.input_context
.endpoint_context_0
.tr_deque_pointer = TRDequeuePointer::new()
.with_pointer(device_slot.endpoint_0_transfer_ring.physical_base_address() as u64)
.with_dequeue_cycle_state(true);
device_slot.input_context.endpoint_context_0.fields = EndpointContextFields::new()
.with_endpoint_type(EndpointType::Control)
.with_max_primary_streams(0)
.with_max_burst_size(0)
.with_endpoint_state(EndpointState::Disabled);
self.device_context_base_array.0[slot_id as usize] =
device_slot.device_context.physical_address() as u64;
let paddr = device_slot.input_context.physical_address();
self.slots[slot_id as usize - 1] = Some(device_slot);
paddr
}
pub fn slot_at(&mut self, slot_id: u8) -> &mut DeviceSlot {
self.slots[slot_id as usize - 1].as_mut().unwrap()
}
}

View file

@ -0,0 +1,369 @@
use alloc::sync::Arc;
use mammoth::sync::Mutex;
use mammoth::task::Spawner;
use mammoth::task::Task;
use super::registers::{self};
use crate::xhci::data_structures::AddressDeviceCommand;
use crate::xhci::data_structures::CommandCompletionCode;
use crate::xhci::data_structures::CommandCompletionEvent;
use crate::xhci::data_structures::CommandTrb;
use crate::xhci::data_structures::EnableSlotCommand;
use crate::xhci::data_structures::EventTrb;
use crate::xhci::data_structures::NoOpCommand;
use crate::xhci::data_structures::PortStatusChangeEvent;
use crate::xhci::device_context_base_array::DeviceSlotManager;
use crate::xhci::interrupter::Interrupter;
use crate::xhci::registers::DoorbellPointer;
use crate::xhci::registers::HostControllerOperationalWrapper;
use crate::xhci::registers::InterrupterRegisterSet;
use crate::xhci::registers::PortStatusAndControl;
use crate::xhci::trb_ring::CommandRing;
use crate::xhci::usb::Configuration;
use crate::xhci::usb::EndpointDescriptor;
pub struct XHCIDriver {
#[allow(dead_code)]
pci_device: pci::PciDevice,
capabilities: registers::HostControllerCapabilities,
operational: HostControllerOperationalWrapper,
command_ring: Mutex<CommandRing>,
// TODO: Add multiple interrupters.
interrupter: Mutex<Interrupter>,
device_slot_manager: Mutex<DeviceSlotManager>,
}
impl XHCIDriver {
pub fn from_pci_device(mut pci_device: pci::PciDevice) -> Self {
let address =
((pci_device.header().bars[1] as usize) << 32) | (pci_device.header().bars[0] as usize);
let irq_port_cap = pci_device.register_msi().unwrap();
let (operational, capabilities) = HostControllerOperationalWrapper::new(address);
let max_slots = capabilities.params_1.max_device_slots();
let doorbell_physical = address + capabilities.doorbell_offset as usize;
let (command_doorbell, slot_doorbells) =
DoorbellPointer::create_command_and_slots(doorbell_physical, max_slots);
// Offset to skip the mfindex register.
let interrupter_registers = mammoth::mem::map_direct_physical_and_leak(
address + capabilities.runtime_register_space_offset as usize,
size_of::<InterrupterRegisterSet>() * 2,
);
let interrupter_registers = unsafe { interrupter_registers.add(1) };
let mut driver = Self {
pci_device,
capabilities,
operational,
command_ring: Mutex::new(CommandRing::new(command_doorbell)),
interrupter: Mutex::new(Interrupter::new(interrupter_registers, irq_port_cap)),
device_slot_manager: Mutex::new(DeviceSlotManager::new(max_slots, slot_doorbells)),
};
driver.initialize();
driver
}
fn initialize(&mut self) {
#[cfg(feature = "debug")]
mammoth::debug!("Stopping XHCI Controller.");
// Stop the host controller.
self.operational
.update_command(|cmd| cmd.with_run_stop(false));
#[cfg(feature = "debug")]
mammoth::debug!("Waiting for controller to halt.");
// Sleep until the controller is halted.
let mut status = self.operational.read_status();
while !status.host_controller_halted() {
// TODO: Sleep for how long?
mammoth::syscall::thread_sleep(50).unwrap();
status = self.operational.read_status();
}
#[cfg(feature = "debug")]
mammoth::debug!("Resetting Controller.");
self.operational
.update_command(|cmd| cmd.with_host_controller_reset(true));
let mut command: registers::UsbCommand = self.operational.read_command();
while command.host_controller_reset() {
// TODO: Sleep for how long?
mammoth::syscall::thread_sleep(50).unwrap();
command = self.operational.read_command();
}
#[cfg(feature = "debug")]
mammoth::debug!("XHCI Controller Reset, waiting ready.");
let mut status = self.operational.read_status();
while status.controller_not_ready() {
// TODO: Sleep for how long?
mammoth::syscall::thread_sleep(50).unwrap();
status = self.operational.read_status();
}
#[cfg(feature = "debug")]
mammoth::debug!("XHCI Controller Ready.");
#[cfg(feature = "debug")]
mammoth::debug!("Setting Command Ring");
self.operational.set_command_ring_dequeue_pointer(
self.command_ring.lock().trb_ring.physical_base_address(),
true,
);
#[cfg(feature = "debug")]
mammoth::debug!("Setting DCBA.");
self.operational
.set_device_context_base_address_array_pointer(
self.device_slot_manager
.lock()
.device_context_base_array_physical_address(),
);
// We tell the controller that we can support as many slots as it does because
// we allocate a full 4K page to the DCBA, which is 256 entries and the max
// slots are 255.
self.operational.update_configure(|cfg| {
cfg.with_max_device_slots_enabled(self.capabilities.params_1.max_device_slots())
.with_configuration_information_enable(
self.capabilities
.cap_params_2
.configuration_information_capability(),
)
});
assert!(
self.capabilities.params_2.max_scratchpad_buffers() == 0,
"Unsupported scratchpad buffers."
);
#[cfg(feature = "debug")]
mammoth::debug!("Resetting event ring.");
// SAFETY: The HC is stopped.
unsafe { self.interrupter.lock().reset() };
self.operational
.update_command(|cmd| cmd.with_run_stop(true).with_interrupter_enable(true));
#[cfg(feature = "debug")]
mammoth::debug!("Enabled interrupts and controller.");
}
pub fn interrupt_loop(self: Arc<Self>, spawner: Spawner) {
let completion_handler = |trb| {
self.clone().handle_completion(spawner.clone(), trb);
};
self.interrupter.lock().interrupt_loop(completion_handler);
}
fn handle_completion(self: Arc<XHCIDriver>, spawner: Spawner, trb: EventTrb) {
match trb {
EventTrb::Transfer(trb) => {
if trb.completion_code() != CommandCompletionCode::Success {
mammoth::debug!(
"Got a transfer event with a bad completion code: {:?}",
trb.completion_code()
)
}
// TODO: make this not panic if we don't have the slot.
self.device_slot_manager
.lock()
.slot_at(trb.slot_id())
.handle_completion(trb);
}
EventTrb::CommandCompletion(trb) => {
self.command_ring
.lock()
.trb_ring
.handle_completion(trb, trb.command_trb_pointer() as usize);
}
EventTrb::PortStatusChange(trb) => {
let self_clone = self.clone();
spawner.spawn(Task::new(async move {
self_clone.port_status_change(trb).await
}));
}
EventTrb::BandwidthRequest(trb) => {
mammoth::debug!("Unhandled bandwidth request event: {:?}", trb);
}
EventTrb::Doorbell(trb) => {
mammoth::debug!("Unhandled doorbell event: {:?}", trb);
}
EventTrb::HostController(trb) => {
mammoth::debug!("Unhandled host controller event: {:?}", trb);
}
EventTrb::DeviceNotification(trb) => {
mammoth::debug!("Unhandled device notification event: {:?}", trb);
}
EventTrb::MFINDEXWrap(trb) => {
mammoth::debug!("Unhandled MFINDEX wrap event: {:?}", trb);
}
}
}
async fn send_command(&self, trb: CommandTrb) -> CommandCompletionEvent {
// Split the future and the await so the lock is dropped before we await.
let future = { self.command_ring.lock().enqueue_command(trb) };
future.await
}
pub async fn startup(&self) {
#[cfg(feature = "debug")]
mammoth::debug!("Sending no op command.");
let result = self.send_command(NoOpCommand::new().into()).await;
assert_eq!(result.completion_code(), CommandCompletionCode::Success);
#[cfg(feature = "debug")]
mammoth::debug!("Successfully tested no op command.");
#[cfg(feature = "debug")]
mammoth::debug!("Resetting all connected ports.");
for port_index in 0..self.operational.num_ports() {
self.operational
.update_port_status(port_index, |p| p.clear_change_bits());
}
for port_index in 0..self.operational.num_ports() {
let status = self.operational.get_port(port_index).status_and_control;
if status.port_power() && status.current_connect_status() {
mammoth::debug!("Resetting port {}", port_index);
self.operational.update_port_status(port_index, |_| {
PortStatusAndControl::new()
.with_port_reset(true)
.with_port_power(true)
});
}
}
}
async fn port_status_change(self: Arc<Self>, status_change: PortStatusChangeEvent) {
// Ports are indexed from 1.
let port_id = status_change.port_id();
let port_index = (port_id - 1) as usize;
let port_status = self.operational.get_port(port_index).status_and_control;
#[cfg(feature = "debug")]
mammoth::debug!("Port status change for port {}", port_id);
if !port_status.port_reset_change() {
mammoth::debug!(
"Unknown port status event, not handling. status= {:?}",
port_status
);
return;
}
self.operational
.update_port_status(port_index, |s| s.clear_change_bits());
#[cfg(feature = "debug")]
mammoth::debug!("Enabling slot.");
let resp = self.send_command(EnableSlotCommand::new().into()).await;
assert_eq!(resp.completion_code(), CommandCompletionCode::Success);
let slot = resp.slot_id();
#[cfg(feature = "debug")]
mammoth::debug!("Creating slot data structures in slot {}.", slot);
let input_context_address = self
.device_slot_manager
.lock()
.prep_slot_for_address_device(slot, port_id);
#[cfg(feature = "debug")]
mammoth::debug!("Sending address device.");
let resp = self
.send_command(
AddressDeviceCommand::new()
.with_slot_id(slot)
.with_input_context_pointer(input_context_address as u64)
.into(),
)
.await;
assert_eq!(resp.completion_code(), CommandCompletionCode::Success);
#[cfg(feature = "debug")]
mammoth::debug!("Reading device descriptor");
let (fut, descriptor) = self
.device_slot_manager
.lock()
.slot_at(slot)
.read_device_descriptor();
let resp = fut.await;
assert_eq!(resp.completion_code(), CommandCompletionCode::Success);
#[cfg(feature = "debug")]
mammoth::debug!("Descriptor: {:0x?}", *descriptor);
assert!(descriptor.num_configurations >= 1);
#[cfg(feature = "debug")]
mammoth::debug!("Reading configuration descriptor");
let (fut, region) = self
.device_slot_manager
.lock()
.slot_at(slot)
.read_configuration_descriptor(0);
let resp = fut.await;
assert_eq!(resp.completion_code(), CommandCompletionCode::Success);
#[cfg(feature = "debug")]
let config = Configuration::from(region);
mammoth::debug!("Configuration: {:#x?}", config.configuration);
for int in &config.interfaces {
#[cfg(feature = "debug")]
mammoth::debug!(
"\tInterface {:#x}: {:#?}",
int.interface.interface_number,
int.interface
);
for end in &int.endpoints {
#[cfg(feature = "debug")]
mammoth::debug!("\t\tEndpoint {:#x}: {:#x?}", end.endpoint_address, end);
}
}
assert_eq!(
1,
config.interfaces.len(),
"TODO: Handle multiple interfaces."
);
// TODO: Handle non Keyboard devices.
let int0 = &config.interfaces[0];
assert_eq!(int0.interface.class, 0x3);
assert_eq!(int0.interface.subclass, 0x1);
assert_eq!(int0.interface.protocol, 0x1);
assert_eq!(1, int0.endpoints.len(), "TODO: Handle multiple endpoints");
// TODO: Configure Endpoint Command.
let paddr = self
.device_slot_manager
.lock()
.slot_at(slot)
.prep_configure_endpoint(
&config,
self.capabilities
.cap_params_2
.configuration_information_capability(),
);
}
}

View file

@ -0,0 +1,70 @@
use alloc::vec::Vec;
use crate::xhci::{
data_structures::{EventRingSegmentTable, EventTrb, TransferRequestBlock, TrbRingSegment},
trb_ring::TrbPointer,
};
pub struct EventRing {
segment_table: EventRingSegmentTable,
segments: Vec<TrbRingSegment>,
cycle_bit: bool,
trb_pointer: TrbPointer,
}
impl EventRing {
pub fn new() -> Self {
// Software maintains an Event Ring Consumer Cycle State (CCS) bit, initializing it
// to 1...
let cycle_bit = true;
let mut event_ring = Self {
segment_table: EventRingSegmentTable::new(1),
segments: [TrbRingSegment::new(100)].into(),
cycle_bit,
trb_pointer: TrbPointer::default(),
};
event_ring.segment_table[0].update_from_trb_ring(&event_ring.segments[0]);
event_ring
}
pub fn segment_table(&self) -> &EventRingSegmentTable {
&self.segment_table
}
pub fn erdp_physical_address(&self) -> usize {
self.segments[self.trb_pointer.segment_index].physical_address()
+ self.trb_pointer.segment_physical_offset()
}
fn current_trb(&self) -> TransferRequestBlock {
// TODO: These should be volatile reads.
self.segments[self.trb_pointer.segment_index][self.trb_pointer.segment_offset]
}
fn increment_pointer(&mut self) {
self.trb_pointer.segment_offset += 1;
if self.trb_pointer.segment_offset == self.segments[self.trb_pointer.segment_index].len() {
self.trb_pointer.segment_index += 1;
self.trb_pointer.segment_offset = 0;
if self.trb_pointer.segment_index == self.segments.len() {
// Wrap around to front.
self.trb_pointer.segment_index = 0;
self.cycle_bit = !self.cycle_bit;
}
}
}
pub fn get_next(&mut self) -> Option<EventTrb> {
let curr = self.current_trb();
if curr.cycle() != self.cycle_bit {
None
} else {
self.increment_pointer();
Some(curr.into())
}
}
}

View file

@ -0,0 +1,60 @@
use core::ptr::NonNull;
use mammoth::cap::Capability;
use crate::xhci::{
data_structures::{EventTrb, TransferRequestBlock},
event_ring::EventRing,
registers::{InterrupterModeration, InterrupterRegisterSet, InterrupterRegisters},
};
pub struct Interrupter {
event_ring: EventRing,
register_set: InterrupterRegisters,
irq_port_cap: Capability,
}
impl Interrupter {
pub fn new(
interrupter_register_set: NonNull<InterrupterRegisterSet>,
irq_port_cap: Capability,
) -> Self {
Self {
event_ring: EventRing::new(),
register_set: InterrupterRegisters::new(interrupter_register_set),
irq_port_cap,
}
}
// SAFETY:
// - HC Must be halted for interrupter 0.
pub unsafe fn reset(&mut self) {
// SAFETY:
// - THe segment table is size 1.
unsafe {
self.register_set.set_event_ring(
self.event_ring.segment_table(),
self.event_ring.erdp_physical_address(),
);
}
self.register_set.set_moderation(
InterrupterModeration::new()
.with_interrupt_moderation_interval(4000)
.with_interrupt_moderation_counter(0),
);
self.register_set.enable_interrupts();
}
pub fn interrupt_loop(&mut self, completion_handler: impl Fn(EventTrb)) {
loop {
let _ = mammoth::syscall::port_recv(&self.irq_port_cap, &mut [], &mut []).unwrap();
while let Some(trb) = self.event_ring.get_next() {
completion_handler(trb);
}
self.register_set
.update_dequeue_pointer_clearing_busy(self.event_ring.erdp_physical_address());
}
}
}

View file

@ -1,2 +1,8 @@
pub mod data_structures;
pub mod registers;
mod data_structures;
mod device_context_base_array;
pub mod driver;
mod event_ring;
mod interrupter;
mod registers;
mod trb_ring;
mod usb;

View file

@ -0,0 +1,199 @@
use core::task::{Poll, Waker};
use alloc::{collections::vec_deque::VecDeque, sync::Arc, vec::Vec};
use mammoth::sync::Mutex;
use crate::xhci::{
data_structures::{
CommandCompletionEvent, CommandTrb, TransferRequestBlock, TrbLink, TrbRingSegment, TypedTrb,
},
registers::DoorbellPointer,
};
struct TrbFutureState<T> {
/// Physical Address for the enqueued TRB.
/// Used for sanity checking.
physical_address: usize,
waker: Option<Waker>,
response: Option<T>,
}
#[derive(Clone)]
pub struct TrbFuture<T> {
state: Arc<Mutex<TrbFutureState<T>>>,
}
impl<T> TrbFuture<T> {
fn new(paddr: usize) -> Self {
Self {
state: Arc::new(Mutex::new(TrbFutureState {
physical_address: paddr,
waker: None,
response: None,
})),
}
}
}
impl<T: Copy> Future for TrbFuture<T> {
type Output = T;
fn poll(
self: core::pin::Pin<&mut Self>,
cx: &mut core::task::Context<'_>,
) -> core::task::Poll<Self::Output> {
let mut state = self.state.lock();
match state.response {
Some(trb) => Poll::Ready(trb),
None => {
state.waker = Some(cx.waker().clone());
Poll::Pending
}
}
}
}
#[derive(Default, Copy, Clone, Debug)]
pub struct TrbPointer {
/// Index into the vector of trb segments.
pub segment_index: usize,
/// Index into the specific segment.
/// This is a TransferRequestBlock index,
/// to get the physical_offset use segment_physical_offset()
pub segment_offset: usize,
}
impl TrbPointer {
pub fn segment_physical_offset(&self) -> usize {
self.segment_offset * size_of::<TransferRequestBlock>()
}
}
pub struct TrbRing<T> {
segments: Vec<TrbRingSegment>,
enqueue_pointer: TrbPointer,
cycle_bit: bool,
pending_futures: VecDeque<TrbFuture<T>>,
}
impl<T: Clone> TrbRing<T> {
pub fn new() -> Self {
Self {
// TODO: What size and count should this be.
segments: alloc::vec![TrbRingSegment::new(100)],
enqueue_pointer: TrbPointer::default(),
// Start with this as true so we are flipping bits from 0 (default) to 1
// to mark the enqueue pointer.
cycle_bit: true,
pending_futures: VecDeque::new(),
}
}
pub fn physical_base_address(&self) -> usize {
self.segments[0].physical_address()
}
fn physical_address_of_enqueue_pointer(&self) -> usize {
self.segments[self.enqueue_pointer.segment_index].physical_address()
+ self.enqueue_pointer.segment_physical_offset()
}
pub fn enqueue_trb(&mut self, trb: TransferRequestBlock) {
*self.next_trb_ref() = trb.with_cycle(self.cycle_bit);
self.advance_enqueue_pointer();
}
pub fn enqueue_trb_expect_interrupt(&mut self, trb: TransferRequestBlock) -> TrbFuture<T> {
let paddr = self.physical_address_of_enqueue_pointer();
*self.next_trb_ref() = trb.with_cycle(self.cycle_bit);
self.advance_enqueue_pointer();
let future = TrbFuture::new(paddr);
self.pending_futures.push_back(future.clone());
future
}
fn next_trb_ref(&mut self) -> &mut TransferRequestBlock {
&mut self.segments[self.enqueue_pointer.segment_index][self.enqueue_pointer.segment_offset]
}
fn advance_enqueue_pointer(&mut self) {
self.enqueue_pointer.segment_offset += 1;
if self.enqueue_pointer.segment_offset
== self.segments[self.enqueue_pointer.segment_index].len() - 1
{
// We have reached the end of the segment, insert a link trb.
// Increment the segment index with wrapping.
let next_segment_index =
if self.enqueue_pointer.segment_index + 1 == self.segments.len() {
0
} else {
self.enqueue_pointer.segment_index + 1
};
let next_segment_pointer = self.segments[next_segment_index].physical_address();
let toggle_cycle = next_segment_index == 0;
*self.next_trb_ref() = TrbLink::new()
.with_ring_segment_pointer(next_segment_pointer as u64)
.with_cycle(self.cycle_bit)
.with_toggle_cycle(toggle_cycle)
.to_trb();
// Flip toggle cycle bit if necessary.
self.cycle_bit ^= toggle_cycle;
self.enqueue_pointer = TrbPointer {
segment_index: next_segment_index,
segment_offset: 0,
};
}
}
pub fn handle_completion(&mut self, completion_trb: T, physical_address: usize) {
{
let peek_completion = self.pending_futures.front().unwrap();
let peek_completion = peek_completion.state.lock();
// TODO: Handle recovery scenarios here.
if peek_completion.physical_address != physical_address {
mammoth::debug!(
"Got an unexpected command completion. Expected: {:0x}, Got: {:0x}",
peek_completion.physical_address,
physical_address
);
return;
}
}
let completion = self.pending_futures.pop_front().unwrap();
let mut completion = completion.state.lock();
completion.response = Some(completion_trb);
if let Some(waker) = &completion.waker {
waker.wake_by_ref();
}
}
}
pub struct CommandRing {
pub trb_ring: TrbRing<CommandCompletionEvent>,
doorbell: DoorbellPointer,
}
impl CommandRing {
pub fn new(doorbell: DoorbellPointer) -> Self {
Self {
trb_ring: TrbRing::new(),
doorbell,
}
}
// We have to explicitly return a future her
pub fn enqueue_command(&mut self, command: CommandTrb) -> TrbFuture<CommandCompletionEvent> {
let fut = self.trb_ring.enqueue_trb_expect_interrupt(command.into());
// Command Doorbell is always 0.
self.doorbell.ring(0);
fut
}
}

View file

@ -0,0 +1,47 @@
use bitfield_struct::{bitenum, bitfield};
use core::fmt::Debug;
/// > The value of the bcdUSB field is 0xJJMN
/// > for version JJ.M.N (JJ major version number, M minor version number, N sub-minor
/// > version number), e.g., version 2.1.3 is represented with value 0213 H and version 3.0 is
/// > represented with a value of 0300H
#[bitfield(u16, debug = false)]
pub struct BCDVersion {
#[bits(4)]
sub_minor: u8,
#[bits(4)]
minor: u8,
major: u8,
}
impl Debug for BCDVersion {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
f.write_fmt(format_args!(
"{}.{}.{}",
self.major(),
self.minor(),
self.sub_minor()
))
}
}
#[bitenum]
#[repr(u8)]
#[derive(Default, Copy, Clone, Debug, Eq, PartialEq)]
pub enum DescriptorType {
#[default]
#[fallback]
Unknown = 0,
Device = 1,
Configuration = 2,
String = 3,
Interface = 4,
Endpoint = 5,
InterfacePower = 8,
Otg = 9,
Debug = 10,
InterfaceAssociaton = 11,
Bos = 15,
DeviceCapability = 16,
}

View file

@ -0,0 +1,183 @@
use alloc::vec::Vec;
use mammoth::mem::MemoryRegion;
use crate::xhci::{data_structures::EndpointType, usb::DescriptorType};
#[repr(C, packed)]
#[derive(Copy, Clone)]
pub struct ConfigurationDescriptor {
length: u8,
descriptor_type: u8,
total_length: u16,
num_interfaces: u8,
pub configuration_value: u8,
configuration_str_index: u8,
attributes: u8,
max_power: u8,
}
const _: () = assert!(size_of::<ConfigurationDescriptor>() == 0x09);
impl core::fmt::Debug for ConfigurationDescriptor {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
let total_length = self.total_length;
f.debug_struct("ConfigurationDescriptor")
.field("length", &self.length)
.field("descriptor_type", &self.descriptor_type)
.field("total_length", &total_length)
.field("num_interfaces", &self.num_interfaces)
.field("configuration_value", &self.configuration_value)
.field("configuration_str_index", &self.configuration_str_index)
.field("attributes", &self.attributes)
.field("max_power", &self.max_power)
.finish()
}
}
#[repr(C, packed)]
#[derive(Copy, Clone)]
pub struct InterfaceDescriptor {
length: u8,
descriptor_type: u8,
pub interface_number: u8,
pub alternate_setting: u8,
pub num_endpoints: u8,
pub class: u8,
pub subclass: u8,
pub protocol: u8,
interface_str_index: u8,
}
const _: () = assert!(size_of::<InterfaceDescriptor>() == 0x09);
impl core::fmt::Debug for InterfaceDescriptor {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
f.debug_struct("InterfaceDescriptor")
.field("length", &self.length)
.field("descriptor_type", &self.descriptor_type)
.field("interface_number", &self.interface_number)
.field("alternate_setting", &self.alternate_setting)
.field("num_endpoints", &self.num_endpoints)
.field("class", &self.class)
.field("subclass", &self.subclass)
.field("protocol", &self.protocol)
.field("interface_str_index", &self.interface_str_index)
.finish()
}
}
#[repr(C, packed)]
#[derive(Copy, Clone)]
pub struct EndpointDescriptor {
length: u8,
descriptor_type: u8,
pub endpoint_address: u8,
pub attributes: u8,
pub max_packet_size: u16,
pub interval: u8,
}
impl EndpointDescriptor {
pub fn endpoint_type(&self) -> EndpointType {
if self.direction_is_in() {
match self.attributes & 0x3 {
0b00 => EndpointType::Control,
0b01 => EndpointType::IsochIn,
0b10 => EndpointType::BulkIn,
0b11 => EndpointType::InterruptIn,
_ => unreachable!(),
}
} else {
match self.attributes & 0x3 {
0b00 => EndpointType::Control,
0b01 => EndpointType::IsochOut,
0b10 => EndpointType::BulkOut,
0b11 => EndpointType::InterruptOut,
_ => unreachable!(),
}
}
}
pub fn direction_is_in(&self) -> bool {
const IN_FLAG: u8 = 0x80;
self.endpoint_address & IN_FLAG == IN_FLAG
}
pub fn endpoint_index(&self) -> u8 {
if self.direction_is_in() {
(2 * self.endpoint_address) + 1
} else {
2 * self.endpoint_address
}
}
}
impl core::fmt::Debug for EndpointDescriptor {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
f.debug_struct("EndpointDescriptor")
.field("length", &self.length)
.field("descriptor_type", &self.descriptor_type)
.field("endpoint_address", &self.endpoint_address)
.field("attributes", &self.attributes)
.field("interval", &self.interval)
.finish()
}
}
const _: () = assert!(size_of::<EndpointDescriptor>() == 0x07);
pub struct Interface {
pub interface: InterfaceDescriptor,
pub endpoints: Vec<EndpointDescriptor>,
}
pub struct Configuration {
pub configuration: ConfigurationDescriptor,
pub interfaces: Vec<Interface>,
}
impl Configuration {
pub fn from(region: MemoryRegion) -> Self {
let configuration = unsafe { *region.raw_ptr_at_offset::<ConfigurationDescriptor>(0) };
let mut interfaces = Vec::new();
let mut curr_offset = configuration.length as u64;
for _ in 0..configuration.num_interfaces {
mammoth::debug!("{:#x}", curr_offset);
let interface =
unsafe { *region.raw_ptr_at_offset::<InterfaceDescriptor>(curr_offset) };
curr_offset += interface.length as u64;
let mut endpoints = Vec::new();
for _ in 0..interface.num_endpoints {
mammoth::debug!("{:#x}", curr_offset);
let mut endpoint =
unsafe { *region.raw_ptr_at_offset::<EndpointDescriptor>(curr_offset) };
let mut descriptor_type = DescriptorType::from_bits(endpoint.descriptor_type);
while descriptor_type != DescriptorType::Endpoint {
let maybe_cap = endpoint.endpoint_address;
mammoth::debug!(
"Ignoring Descriptor: {:?}, {}",
endpoint.descriptor_type,
maybe_cap
);
curr_offset += endpoint.length as u64;
endpoint =
unsafe { *region.raw_ptr_at_offset::<EndpointDescriptor>(curr_offset) };
descriptor_type = DescriptorType::from_bits(endpoint.descriptor_type);
}
curr_offset += endpoint.length as u64;
endpoints.push(endpoint);
}
interfaces.push(Interface {
interface,
endpoints,
})
}
Self {
configuration,
interfaces,
}
}
}

View file

@ -0,0 +1,48 @@
use crate::xhci::usb::{BCDVersion, DescriptorType};
#[repr(C, packed)]
#[derive(Default, Copy, Clone)]
pub struct DeviceDescriptor {
length: u8,
descriptor_type: u8,
usb_version: BCDVersion,
device_class: u8,
device_subclass: u8,
device_protocol: u8,
max_packet_size_ep_0: u8,
vendor_id: u16,
product_id: u16,
device_version: BCDVersion,
manufacturer_str_index: u8,
product_str_index: u8,
serial_str_index: u8,
pub num_configurations: u8,
}
const _: () = assert!(size_of::<DeviceDescriptor>() == 0x12);
impl core::fmt::Debug for DeviceDescriptor {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
let usb_version = self.usb_version;
let descriptor_type = DescriptorType::from_bits(self.descriptor_type);
let vendor_id = self.vendor_id;
let product_id = self.product_id;
let device_version = self.device_version;
f.debug_struct("DeviceDescriptor")
.field("length", &self.length)
.field("descriptor_type", &descriptor_type)
.field("usb_version", &usb_version)
.field("device_class", &self.device_class)
.field("device_subclass", &self.device_subclass)
.field("device_protocol", &self.device_protocol)
.field("max_packet_size_ep_0", &self.max_packet_size_ep_0)
.field("vendor_id", &vendor_id)
.field("product_id", &product_id)
.field("device_version", &device_version)
.field("manufacturer_str_index", &self.manufacturer_str_index)
.field("product_str_index", &self.product_str_index)
.field("serial_str_index", &self.serial_str_index)
.field("num_configurations", &self.num_configurations)
.finish()
}
}

View file

@ -0,0 +1,7 @@
mod common;
mod configuration_descriptor;
mod device_descriptor;
pub use common::*;
pub use configuration_descriptor::*;
pub use device_descriptor::*;

View file

@ -18,7 +18,7 @@ if [[ $1 == "debug" ]]; then
fi
# Use machine q35 to access PCI devices.
qemu-system-x86_64 -machine q35 -d guest_errors -m 1G -serial stdio -hda ${BUILD_DIR}/disk.img ${QEMU_ARGS} -device nec-usb-xhci,id=xhci -device usb-kbd,bus=xhci.0
~/.local/bin/qemu-system-x86_64 -machine q35 -d guest_errors -m 1G -serial stdio -hda ${BUILD_DIR}/disk.img ${QEMU_ARGS} -device nec-usb-xhci,id=xhci -device usb-kbd,bus=xhci.0
popd
# Extra options to add to this script in the future.

View file

@ -18,6 +18,8 @@ void DriverManager::WriteMessage(uint64_t irq_num, IpcMessage&& message) {
return;
}
dbgln("IRQ offset {x}", offset);
driver_list_[offset]->Send(glcr::Move(message));
}