Compare commits

..

4 commits

17 changed files with 249 additions and 521 deletions

51
' Normal file
View file

@ -0,0 +1,51 @@
#![no_std]
#![no_main]
extern crate alloc;
mod xhci;
use alloc::sync::Arc;
use mammoth::{
cap::Capability,
debug, define_entry,
sync::Mutex,
task::{Executor, Task},
zion::z_err_t,
};
use pci::PciDevice;
use xhci::driver::XHCIDriver;
define_entry!();
#[unsafe(no_mangle)]
extern "C" fn main() -> z_err_t {
#[cfg(feature = "debug")]
debug!("Voyageurs Starting.");
let yellowstone = yellowstone_yunq::from_init_endpoint();
let xhci_info = yellowstone
.get_xhci_info()
.expect("Failed to get XHCI info from yellowstone.");
let pci_device = PciDevice::from_cap(Capability::take(xhci_info.xhci_region)).unwrap();
let xhci_driver = Arc::new(XHCIDriver::from_pci_device(pci_device));
let executor = Arc::new(Mutex::new(Executor::new()));
let driver_clone = xhci_driver.clone();
let interrupt_thread = mammoth::thread::spawn(move || driver_clone.interrupt_loop());
let driver_clone = xhci_driver.clone();
executor
.clone()
.lock()
.spawn(Task::new((|| xhci_driver.clone().startup())()));
executor.clone().lock().run();
interrupt_thread.join().unwrap();
0
}

View file

@ -250,8 +250,8 @@ pub fn map_cap_and_leak(mem_cap: Capability) -> u64 {
vaddr vaddr
} }
pub fn map_direct_physical_and_leak<T>(paddr: usize, size: usize) -> *mut T { pub fn map_direct_physical_and_leak<T>(paddr: u64, size: u64) -> *mut T {
let mem_cap = syscall::memory_object_direct_physical(paddr as u64, size as u64).unwrap(); let mem_cap = syscall::memory_object_direct_physical(paddr, size).unwrap();
let vaddr = syscall::address_space_map(&mem_cap).unwrap(); let vaddr = syscall::address_space_map(&mem_cap).unwrap();
mem_cap.release(); mem_cap.release();
vaddr as *mut T vaddr as *mut T

View file

@ -16,22 +16,6 @@ pub struct PhysicalBox<T: ?Sized> {
_marker: PhantomData<T>, _marker: PhantomData<T>,
} }
impl<T> PhysicalBox<T> {
pub fn new(data: T) -> Self {
let (memory_region, paddr) =
MemoryRegion::contiguous_physical(size_of::<T>() as u64).expect("Failed to allocate");
// UNWRAP: We know this isn't null.
let ptr = NonNull::new(memory_region.mut_ptr_at_offset(0)).unwrap();
unsafe { ptr.write(data) };
Self {
data: ptr,
region: memory_region,
physical_address: paddr as usize,
_marker: PhantomData,
}
}
}
impl<T: ?Sized> PhysicalBox<T> { impl<T: ?Sized> PhysicalBox<T> {
pub fn physical_address(&self) -> usize { pub fn physical_address(&self) -> usize {
self.physical_address self.physical_address
@ -67,10 +51,16 @@ impl<T> PhysicalBox<[T]> {
{ {
let layout = core::alloc::Layout::array::<T>(len).expect("Layout overflow"); let layout = core::alloc::Layout::array::<T>(len).expect("Layout overflow");
// TODO: Implement a function like alloc that takes a layout. // TODO: Implement a function like alloc that takes a layout. let (memory_region, paddr) =
let (memory_region, paddr) = let (memory_region, paddr) =
MemoryRegion::contiguous_physical(layout.size() as u64).expect("Failed to allocate"); MemoryRegion::contiguous_physical(layout.size() as u64).expect("Failed to allocate");
crate::debug!(
"Physical box allocated: v {:0x} p {:0x}",
memory_region.vaddr(),
paddr
);
let ptr: *mut T = memory_region.mut_ptr_at_offset(0); let ptr: *mut T = memory_region.mut_ptr_at_offset(0);
for i in 0..len { for i in 0..len {
unsafe { unsafe {

View file

@ -29,7 +29,7 @@ impl AhciController {
let pci_device = PciDevice::from_cap(pci_memory).unwrap(); let pci_device = PciDevice::from_cap(pci_memory).unwrap();
let hba_vaddr = let hba_vaddr =
mem::map_direct_physical_and_leak(pci_device.header().bars[5] as usize, 0x1100) mem::map_direct_physical_and_leak(pci_device.header().bars[5] as u64, 0x1100)
as *mut c_void as u64; as *mut c_void as u64;
let hba = unsafe { (hba_vaddr as *mut AhciHba).as_mut().unwrap() }; let hba = unsafe { (hba_vaddr as *mut AhciHba).as_mut().unwrap() };
let mut controller = Self { let mut controller = Self {

View file

@ -1,11 +0,0 @@
use crate::xhci::data_structures::{EndpointContext, SlotContext};
#[repr(C, align(64))]
#[derive(Default)]
pub struct DeviceContext {
slot_context: SlotContext,
endpoint_context_0: EndpointContext,
endpoint_contexts: [EndpointContext; 30],
}
const _: () = assert!(size_of::<DeviceContext>() == 0x400);

View file

@ -1,8 +1,7 @@
use bitfield_struct::{bitenum, bitfield}; use bitfield_struct::bitfield;
#[repr(u8)]
#[bitenum]
#[derive(Debug)] #[derive(Debug)]
#[repr(u8)]
pub enum EndpointState { pub enum EndpointState {
/// The endpoint is not operationa. /// The endpoint is not operationa.
Disabled = 0, Disabled = 0,
@ -18,15 +17,29 @@ pub enum EndpointState {
/// The endpoint is not running due to a TRB Error. SW may manipulate the Transfer /// The endpoint is not running due to a TRB Error. SW may manipulate the Transfer
/// Ring while in this state. /// Ring while in this state.
Error = 4, Error = 4,
#[fallback]
Unknown = 5, Unknown = 5,
} }
#[repr(u8)] impl EndpointState {
#[bitenum] const fn from_bits(value: u8) -> Self {
match value {
0 => Self::Disabled,
1 => Self::Running,
2 => Self::Halted,
3 => Self::Stopped,
4 => Self::Error,
_ => Self::Unknown,
}
}
const fn into_bits(self) -> u8 {
self as u8
}
}
#[derive(Debug)] #[derive(Debug)]
#[repr(u8)]
pub enum EndpointType { pub enum EndpointType {
#[fallback]
NotValid = 0, NotValid = 0,
IsochOut = 1, IsochOut = 1,
BulkOut = 2, BulkOut = 2,
@ -37,6 +50,26 @@ pub enum EndpointType {
InterruptIn = 7, InterruptIn = 7,
} }
impl EndpointType {
const fn from_bits(value: u8) -> Self {
match value {
0 => Self::NotValid,
1 => Self::IsochOut,
2 => Self::BulkOut,
3 => Self::InterruptOut,
4 => Self::Control,
5 => Self::IsochIn,
6 => Self::BulkIn,
7 => Self::InterruptIn,
_ => Self::NotValid,
}
}
const fn into_bits(self) -> u8 {
self as u8
}
}
#[bitfield(u64)] #[bitfield(u64)]
pub struct EndpointContextFields { pub struct EndpointContextFields {
/// Endpoint State (EP State). The Endpoint State identifies the current operational state of the /// Endpoint State (EP State). The Endpoint State identifies the current operational state of the
@ -170,37 +203,26 @@ impl TRDequeuePointer {
self.tr_deque_pointer() << 4 self.tr_deque_pointer() << 4
} }
pub fn set_pointer(&mut self, tr_deque_pointer: u64) { pub fn set_pointer(self, tr_deque_pointer: u64) -> TRDequeuePointer {
self.set_tr_deque_pointer(tr_deque_pointer >> 4)
}
pub fn with_pointer(self, tr_deque_pointer: u64) -> Self {
self.with_tr_deque_pointer(tr_deque_pointer >> 4) self.with_tr_deque_pointer(tr_deque_pointer >> 4)
} }
} }
#[bitfield(u64)] #[repr(C, packed)]
struct AdditionalFields { pub struct EndpointContext {
pub fields: u64,
pub tr_deque_pointer: TRDequeuePointer,
/// Average TRB Length. This field represents the average Length of the TRBs executed by this /// Average TRB Length. This field represents the average Length of the TRBs executed by this
/// endpoint. The value of this field shall be greater than 0. Refer to section 4.14.1.1 and the /// endpoint. The value of this field shall be greater than 0. Refer to section 4.14.1.1 and the
/// implementation note TRB Lengths and System Bus Bandwidth for more information. /// implementation note TRB Lengths and System Bus Bandwidth for more information.
/// The xHC shall use this parameter to calculate system bus bandwidth requirements /// The xHC shall use this parameter to calculate system bus bandwidth requirements
pub average_trb_length: u16, pub average_trb_lenght: u16,
/// Max Endpoint Service Time Interval Payload Low (Max ESIT Payload Lo). This field indicates /// Max Endpoint Service Time Interval Payload Low (Max ESIT Payload Lo). This field indicates
/// the low order 16 bits of the Max ESIT Payload. The Max ESIT Payload represents the total /// the low order 16 bits of the Max ESIT Payload. The Max ESIT Payload represents the total
/// number of bytes this endpoint will transfer during an ESIT. This field is only valid for periodic /// number of bytes this endpoint will transfer during an ESIT. This field is only valid for periodic
/// endpoints. Refer to section 6.2.3.8 for more information. /// endpoints. Refer to section 6.2.3.8 for more information.
pub max_esit_payload_lo: u16, pub max_esit_payload_lo: u16,
__: u32, __: [u32; 3],
}
#[repr(C)]
#[derive(Default)]
pub struct EndpointContext {
pub fields: EndpointContextFields,
pub tr_deque_pointer: TRDequeuePointer,
additional_fields: AdditionalFields,
__: u64,
} }
const _: () = assert!(size_of::<EndpointContext>() == 0x20); const _: () = assert!(size_of::<EndpointContext>() == 0x20);

View file

@ -1,33 +0,0 @@
use bitfield_struct::bitfield;
use crate::xhci::data_structures::{EndpointContext, SlotContext};
#[bitfield(u32)]
pub struct InputControlContextSettings {
configuration_value: u8,
interface_number: u8,
alternate_setting: u8,
__: u8,
}
#[repr(C)]
#[derive(Default)]
pub struct InputControlContext {
pub drop_context_flags: u32,
pub add_context_flags: u32,
__: [u32; 5],
settings: InputControlContextSettings,
}
const _: () = assert!(size_of::<InputControlContext>() == 0x20);
#[repr(C)]
#[derive(Default)]
pub struct InputContext {
pub input_control_context: InputControlContext,
pub slot_context: SlotContext,
pub endpoint_context_0: EndpointContext,
pub endpoint_contexts: [EndpointContext; 30],
}
const _: () = assert!(size_of::<InputContext>() == 0x420);

View file

@ -1,15 +1,10 @@
mod device_context;
mod endpoint_context; mod endpoint_context;
mod event_ring_segment_table; mod event_ring_segment_table;
mod input_context;
mod slot_context; mod slot_context;
mod trb; mod trb;
mod trb_ring_segment; mod trb_ring_segment;
pub use device_context::*;
pub use endpoint_context::*;
pub use event_ring_segment_table::*; pub use event_ring_segment_table::*;
pub use input_context::*;
pub use slot_context::*; pub use slot_context::*;
pub use trb::*; pub use trb::*;
pub use trb_ring_segment::*; pub use trb_ring_segment::*;

View file

@ -125,7 +125,6 @@ pub struct SlotContextFields {
} }
#[repr(C)] #[repr(C)]
#[derive(Default)]
pub struct SlotContext { pub struct SlotContext {
pub fields: SlotContextFields, pub fields: SlotContextFields,
__: u128, __: u128,

View file

@ -142,35 +142,6 @@ pub struct TrbLink {
impl TypedTrb for TrbLink {} impl TypedTrb for TrbLink {}
#[bitfield(u128)]
pub struct TrbTransferEvent {
pub transfer_trb_pointer: u64,
#[bits(24)]
pub trb_transfer_lenght: u32,
/// Completion Code. This field encodes the completion status of the command that generated the
/// event. Refer to the respective command definition for a list of the possible Completion Codes
/// associated with the command. Refer to section 6.4.5 for an enumerated list of possible error
/// conditions.
pub completion_code: u8,
#[bits(10)]
__: u16,
/// TRB Type. This field identifies the type of the TRB. Refer to Table 6-91 for the definition of the
/// Command Completion Event TRB type ID
#[bits(6, default=TrbType::TransferEvent)]
pub trb_type: TrbType,
#[bits(5)]
pub endpoint_id: u8,
#[bits(3)]
__: u8,
pub slot_id: u8,
}
impl TypedTrb for TrbTransferEvent {}
#[bitenum] #[bitenum]
#[repr(u8)] #[repr(u8)]
pub enum CommandCompletionCode { pub enum CommandCompletionCode {
@ -261,18 +232,3 @@ pub struct TrbEnableSlotCommand {
} }
impl TypedTrb for TrbEnableSlotCommand {} impl TypedTrb for TrbEnableSlotCommand {}
#[bitfield(u128)]
pub struct TrbAddressDeviceCommand {
pub input_context_pointer: u64,
__: u32,
#[bits(9)]
__: u16,
pub block_set_address_request: bool,
#[bits(6, default=TrbType::AddressDeviceCommand)]
trb_typ: TrbType,
__: u8,
pub slot_id: u8,
}
impl TypedTrb for TrbAddressDeviceCommand {}

View file

@ -1,104 +1,22 @@
use alloc::boxed::Box; use mammoth::mem::MemoryRegion;
use mammoth::physical_box::PhysicalBox;
use crate::xhci::{ pub struct DeviceContextBaseArray {
data_structures::{ #[allow(dead_code)]
DeviceContext, EndpointContextFields, EndpointState, EndpointType, InputContext, region: MemoryRegion,
TRDequeuePointer, TrbTransferEvent, physical_addr: usize,
}, }
registers::DoorbellPointer,
trb_ring::TrbRing,
};
struct DeviceContextBaseArray(PhysicalBox<[u64]>);
impl DeviceContextBaseArray { impl DeviceContextBaseArray {
pub fn new(max_slots: u8) -> Self { pub fn new() -> Self {
Self(PhysicalBox::default_with_count(0, max_slots as usize + 1)) let (region, physical_addr) = MemoryRegion::contiguous_physical(0x1000).unwrap();
} region.zero_region();
}
pub struct DeviceSlot {
device_context: PhysicalBox<DeviceContext>,
endpoint_0_transfer_ring: TrbRing<TrbTransferEvent>,
}
impl DeviceSlot {
fn new() -> Self {
Self { Self {
device_context: PhysicalBox::new(DeviceContext::default()), region,
endpoint_0_transfer_ring: TrbRing::new(), physical_addr: physical_addr as usize,
}
}
}
pub struct DeviceSlotManager {
device_context_base_array: DeviceContextBaseArray,
slots: Box<[Option<DeviceSlot>]>,
doorbells: Box<[DoorbellPointer]>,
}
impl DeviceSlotManager {
pub fn new(max_slots: u8, doorbells: Box<[DoorbellPointer]>) -> Self {
assert!(
doorbells.len() == max_slots as usize,
"Got an incorrect doorbell slice size."
);
Self {
device_context_base_array: DeviceContextBaseArray::new(max_slots),
slots: core::iter::repeat_with(|| None)
.take(max_slots as usize)
.collect(),
doorbells,
} }
} }
pub fn device_context_base_array_physical_address(&self) -> usize { pub fn physical_addr(&self) -> usize {
self.device_context_base_array.0.physical_address() self.physical_addr
}
/// Prepares a slot and an input context for an address device command.
///
/// Follows section 4.6.5 of the XHCI spec.
pub fn prep_slot_for_address_device(
&mut self,
slot_id: u8,
port_number: u8,
) -> PhysicalBox<InputContext> {
// TODO: Ensure alignment
let device_slot = DeviceSlot::new();
let mut input_context = PhysicalBox::new(InputContext::default());
// The Add Context flags for the Slot Context and the Endpoint 0 Context shall be set to 1.
input_context.input_control_context.add_context_flags = 0x3;
// See XHCI 4.5.2 for information
input_context.slot_context.fields = input_context
.slot_context
.fields
.with_root_hub_port_number(port_number)
.with_route_string(0)
.with_context_entries(1)
.with_interrupter_target(0);
// The Endpoint 0 Context data structure in the
// Input Context shall define valid values for the TR Dequeue Pointer, EP Type, Error
// Count (CErr), and Max Packet Size fields. The MaxPStreams, Max Burst Size, and
// EP State values shall be cleared to '0'
input_context.endpoint_context_0.tr_deque_pointer = TRDequeuePointer::new()
.with_pointer(device_slot.endpoint_0_transfer_ring.physical_base_address() as u64)
.with_dequeue_cycle_state(true);
input_context.endpoint_context_0.fields = EndpointContextFields::new()
.with_endpoint_type(EndpointType::Control)
.with_max_primary_streams(0)
.with_max_burst_size(0)
.with_endpoint_state(EndpointState::Disabled);
self.device_context_base_array.0[slot_id as usize] =
device_slot.device_context.physical_address() as u64;
self.slots[slot_id as usize - 1] = Some(device_slot);
input_context
} }
} }

View file

@ -10,19 +10,17 @@ use mammoth::write_unaligned_volatile;
use super::registers::{self}; use super::registers::{self};
use crate::xhci::data_structures::CommandCompletionCode; use crate::xhci::data_structures::CommandCompletionCode;
use crate::xhci::data_structures::TrbAddressDeviceCommand;
use crate::xhci::data_structures::TrbCommandCompletion; use crate::xhci::data_structures::TrbCommandCompletion;
use crate::xhci::data_structures::TrbEnableSlotCommand; use crate::xhci::data_structures::TrbEnableSlotCommand;
use crate::xhci::data_structures::TrbNoOp; use crate::xhci::data_structures::TrbNoOp;
use crate::xhci::data_structures::TrbPortStatusChangeEvent; use crate::xhci::data_structures::TrbPortStatusChangeEvent;
use crate::xhci::data_structures::TrbType; use crate::xhci::data_structures::TrbType;
use crate::xhci::data_structures::TypedTrb; use crate::xhci::data_structures::TypedTrb;
use crate::xhci::device_context_base_array::DeviceSlotManager; use crate::xhci::device_context_base_array::DeviceContextBaseArray;
use crate::xhci::event_ring::EventRing; use crate::xhci::event_ring::EventRing;
use crate::xhci::registers::DoorbellPointer;
use crate::xhci::registers::HostControllerOperationalWrapper; use crate::xhci::registers::HostControllerOperationalWrapper;
use crate::xhci::registers::PortStatusAndControl; use crate::xhci::registers::PortStatusAndControl;
use crate::xhci::trb_ring::CommandRing; use crate::xhci::trb_ring::TrbRing;
pub struct XHCIDriver { pub struct XHCIDriver {
#[allow(dead_code)] #[allow(dead_code)]
@ -30,9 +28,9 @@ pub struct XHCIDriver {
capabilities: registers::HostControllerCapabilities, capabilities: registers::HostControllerCapabilities,
operational: HostControllerOperationalWrapper, operational: HostControllerOperationalWrapper,
registers_region: MemoryRegion, registers_region: MemoryRegion,
command_ring: Mutex<CommandRing>, command_ring: Mutex<TrbRing<TrbCommandCompletion>>,
event_ring: Mutex<EventRing>, event_ring: Mutex<EventRing>,
device_slot_manager: Mutex<DeviceSlotManager>, device_context_base_array: DeviceContextBaseArray,
irq_port_cap: Capability, irq_port_cap: Capability,
} }
@ -47,25 +45,23 @@ impl XHCIDriver {
// page pointed to by the BAR0. // page pointed to by the BAR0.
let three_pages = 0x3000; let three_pages = 0x3000;
let address = let address =
((pci_device.header().bars[1] as usize) << 32) | (pci_device.header().bars[0] as usize); ((pci_device.header().bars[1] as u64) << 32) | (pci_device.header().bars[0] as u64);
let registers_region = MemoryRegion::direct_physical(address as u64, three_pages).unwrap(); let registers_region = MemoryRegion::direct_physical(address, three_pages).unwrap();
let irq_port_cap = pci_device.register_msi().unwrap(); let irq_port_cap = pci_device.register_msi().unwrap();
let (operational, capabilities) = HostControllerOperationalWrapper::new(address as usize); let (operational, capabilities) = HostControllerOperationalWrapper::new(address as usize);
let max_slots = capabilities.params_1.max_device_slots(); let p1 = capabilities.params_1;
let doorbell_physical = address as usize + capabilities.doorbell_offset as usize; mammoth::debug!("Num Port: {:?}", p1);
let (command_doorbell, slot_doorbells) =
DoorbellPointer::create_command_and_slots(doorbell_physical, max_slots);
let mut driver = Self { let mut driver = Self {
pci_device, pci_device,
capabilities, capabilities,
operational, operational,
registers_region, registers_region,
command_ring: Mutex::new(CommandRing::new(command_doorbell)), command_ring: Mutex::new(TrbRing::new()),
event_ring: Mutex::new(EventRing::new()), event_ring: Mutex::new(EventRing::new()),
device_slot_manager: Mutex::new(DeviceSlotManager::new(max_slots, slot_doorbells)), device_context_base_array: DeviceContextBaseArray::new(),
irq_port_cap, irq_port_cap,
}; };
driver.initialize(); driver.initialize();
@ -75,15 +71,32 @@ impl XHCIDriver {
fn interrupters(&self) -> &mut [registers::InterrupterRegisterSet] { fn interrupters(&self) -> &mut [registers::InterrupterRegisterSet] {
// See Table 5-35: Host Controller Runtime Registers // See Table 5-35: Host Controller Runtime Registers
const INTERRUPTER_OFFSET_FROM_RUNTIME: u32 = 0x20; const INTERRUPTER_OFFSET_FROM_RUNTIME: u32 = 0x20;
let runtime = self.capabilities.runtime_register_space_offset;
let interrupter_offset = (self.capabilities.runtime_register_space_offset let interrupter_offset = (runtime + INTERRUPTER_OFFSET_FROM_RUNTIME) as usize;
+ INTERRUPTER_OFFSET_FROM_RUNTIME) as usize;
let params1 = self.capabilities.params_1;
// SAFETY: The XHCI spec says so? // SAFETY: The XHCI spec says so?
unsafe { unsafe {
slice::from_raw_parts_mut( slice::from_raw_parts_mut(
self.registers_region.mut_ptr_at_offset(interrupter_offset), self.registers_region.mut_ptr_at_offset(interrupter_offset),
self.capabilities.params_1.max_interrupters() as usize, params1.max_interrupters() as usize,
)
}
}
fn doorbells(&self) -> &mut [registers::Doorbell] {
let doorbell_offset = self.capabilities.doorbell_offset;
let params1 = self.capabilities.params_1;
// SAFETY: The XHCI spec says so?
unsafe {
slice::from_raw_parts_mut(
self.registers_region
.mut_ptr_at_offset(doorbell_offset as usize),
params1.max_device_slots() as usize,
) )
} }
} }
@ -93,41 +106,45 @@ impl XHCIDriver {
mammoth::debug!("Stopping XHCI Controller."); mammoth::debug!("Stopping XHCI Controller.");
// Stop the host controller. // Stop the host controller.
self.operational self.operational.update(|mut o| {
.update_command(|cmd| cmd.with_run_stop(false)); o.usb_command = o.usb_command.with_run_stop(false);
o
});
#[cfg(feature = "debug")] #[cfg(feature = "debug")]
mammoth::debug!("Waiting for controller to halt."); mammoth::debug!("Waiting for controller to halt.");
// Sleep until the controller is halted. // Sleep until the controller is halted.
let mut status = self.operational.read_status(); let mut status = self.operational.status();
while !status.host_controller_halted() { while !status.host_controller_halted() {
// TODO: Sleep for how long? // TODO: Sleep for how long?
mammoth::syscall::thread_sleep(50).unwrap(); mammoth::syscall::thread_sleep(50).unwrap();
status = self.operational.read_status(); status = self.operational.status();
} }
#[cfg(feature = "debug")] #[cfg(feature = "debug")]
mammoth::debug!("Resetting Controller."); mammoth::debug!("Resetting Controller.");
self.operational self.operational.update(|mut o| {
.update_command(|cmd| cmd.with_host_controller_reset(true)); o.usb_command = o.usb_command.with_host_controller_reset(false);
o
});
let mut command: registers::UsbCommand = self.operational.read_command(); let mut command: registers::UsbCommand = self.operational.command();
while command.host_controller_reset() { while command.host_controller_reset() {
// TODO: Sleep for how long? // TODO: Sleep for how long?
mammoth::syscall::thread_sleep(50).unwrap(); mammoth::syscall::thread_sleep(50).unwrap();
command = self.operational.read_command(); command = self.operational.command();
} }
#[cfg(feature = "debug")] #[cfg(feature = "debug")]
mammoth::debug!("XHCI Controller Reset, waiting ready."); mammoth::debug!("XHCI Controller Reset, waiting ready.");
let mut status = self.operational.read_status(); let mut status = self.operational.status();
while status.controller_not_ready() { while status.controller_not_ready() {
// TODO: Sleep for how long? // TODO: Sleep for how long?
mammoth::syscall::thread_sleep(50).unwrap(); mammoth::syscall::thread_sleep(50).unwrap();
status = self.operational.read_status(); status = self.operational.status();
} }
#[cfg(feature = "debug")] #[cfg(feature = "debug")]
@ -136,29 +153,34 @@ impl XHCIDriver {
#[cfg(feature = "debug")] #[cfg(feature = "debug")]
mammoth::debug!("Setting Command Ring"); mammoth::debug!("Setting Command Ring");
self.operational.set_command_ring_dequeue_pointer( self.operational.update(|mut o| {
self.command_ring.lock().trb_ring.physical_base_address(), // TODO: Split this struct to make it clearer that we are setting the cycle bit here.
true, o.command_ring_control = (self.command_ring.lock().physical_base_address() | 1) as u64;
); o
});
#[cfg(feature = "debug")] #[cfg(feature = "debug")]
mammoth::debug!("Setting DCBA."); mammoth::debug!("Setting DCBA.");
self.operational let params1 = self.capabilities.params_1;
.set_device_context_base_address_array_pointer( self.operational.update(|mut o| {
self.device_slot_manager o.device_context_base_address_array_pointer =
.lock() self.device_context_base_array.physical_addr() as u64;
.device_context_base_array_physical_address(), // We tell the controller that we can support as many slots as it does because
); // we allocate a full 4K page to the DCBA, which is 256 entries and the max
// We tell the controller that we can support as many slots as it does because // slots are 255.
// we allocate a full 4K page to the DCBA, which is 256 entries and the max o.configure = o
// slots are 255. .configure
self.operational.update_configure(|cfg| { .with_max_device_slots_enabled(params1.max_device_slots());
cfg.with_max_device_slots_enabled(self.capabilities.params_1.max_device_slots()) o
}); });
let params2 = self.capabilities.params_2;
let max_scratchpad_buffers =
(params2.max_scratchpad_buffers_hi() << 5) | params2.max_scratchpad_buffers_lo();
assert!( assert!(
self.capabilities.params_2.max_scratchpad_buffers() == 0, max_scratchpad_buffers == 0,
"Unsupported scratchpad buffers." "Unsupported scratchpad buffers."
); );
@ -189,8 +211,13 @@ impl XHCIDriver {
registers::InterrupterManagement::new().with_interrupt_enabled(true) registers::InterrupterManagement::new().with_interrupt_enabled(true)
); );
self.operational self.operational.update(|mut o| {
.update_command(|cmd| cmd.with_run_stop(true).with_interrupter_enable(true)); o.usb_command = o
.usb_command
.with_run_stop(true)
.with_interrupter_enable(true);
o
});
#[cfg(feature = "debug")] #[cfg(feature = "debug")]
mammoth::debug!("Enabled interrupts and controller."); mammoth::debug!("Enabled interrupts and controller.");
@ -209,6 +236,7 @@ impl XHCIDriver {
let mut event_ring = self.event_ring.lock(); let mut event_ring = self.event_ring.lock();
while let Some(trb) = event_ring.get_next() { while let Some(trb) = event_ring.get_next() {
event_ring.update_dequeue_pointer(&mut self.interrupters()[0]);
match trb.trb_type() { match trb.trb_type() {
TrbType::TransferEvent => { TrbType::TransferEvent => {
todo!("Handle Transfer") todo!("Handle Transfer")
@ -216,8 +244,7 @@ impl XHCIDriver {
TrbType::CommandCompletionEvent => { TrbType::CommandCompletionEvent => {
self.command_ring self.command_ring
.lock() .lock()
.trb_ring .handle_commpletion(TrbCommandCompletion::from_trb(trb));
.handle_completion(TrbCommandCompletion::from_trb(trb));
} }
TrbType::PortStatusChangeEvent => { TrbType::PortStatusChangeEvent => {
let trb = TrbPortStatusChangeEvent::from_trb(trb); let trb = TrbPortStatusChangeEvent::from_trb(trb);
@ -231,14 +258,13 @@ impl XHCIDriver {
} }
} }
} }
event_ring.update_dequeue_pointer(&mut self.interrupters()[0]);
} }
} }
async fn send_command(&self, trb: impl TypedTrb) -> TrbCommandCompletion { async fn send_command(&self, trb: impl TypedTrb) -> TrbCommandCompletion {
// Split the future and the await so the lock is dropped before we await. let fut = self.command_ring.lock().enqueue_trb(trb.to_trb());
let future = { self.command_ring.lock().enqueue_command(trb) }; self.doorbells()[0].ring_command();
future.await fut.await
} }
pub async fn startup(&self) { pub async fn startup(&self) {
@ -283,16 +309,16 @@ impl XHCIDriver {
.status_and_control; .status_and_control;
#[cfg(feature = "debug")] #[cfg(feature = "debug")]
mammoth::debug!("Port status change for port {}", port_id); mammoth::debug!(
"Port status change for port {}, status= {:?}",
port_id,
port_status
);
if !port_status.port_reset_change() { if !port_status.port_reset_change() {
mammoth::debug!( mammoth::debug!("Unknown port status event, not handling.");
"Unknown port status event, not handling. status= {:?}",
port_status
);
return; return;
} }
self.operational self.operational
.update_port_status(port_index, |s| s.clear_change_bits()); .update_port_status(port_index, |s| s.clear_change_bits());
@ -301,27 +327,5 @@ impl XHCIDriver {
let resp = self.send_command(TrbEnableSlotCommand::new()).await; let resp = self.send_command(TrbEnableSlotCommand::new()).await;
assert!(resp.completion_code() == CommandCompletionCode::Success.into_bits()); assert!(resp.completion_code() == CommandCompletionCode::Success.into_bits());
let slot = resp.slot_id();
#[cfg(feature = "debug")]
mammoth::debug!("Creating slot data structures in slot {}.", slot);
let input_context = self
.device_slot_manager
.lock()
.prep_slot_for_address_device(slot, port_id);
#[cfg(feature = "debug")]
mammoth::debug!("Sending address device.");
let resp = self
.send_command(
TrbAddressDeviceCommand::new()
.with_slot_id(slot)
.with_input_context_pointer(input_context.physical_address() as u64),
)
.await;
assert!(resp.completion_code() == CommandCompletionCode::Success.into_bits());
} }
} }

View file

@ -1,7 +1,7 @@
use alloc::vec::Vec; use alloc::vec::Vec;
use crate::xhci::{ use crate::xhci::{
data_structures::{EventRingSegmentTable, TransferRequestBlock, TrbRingSegment}, data_structures::{EventRingSegmentTable, TransferRequestBlock, TrbRingSegment, TrbType},
registers::InterrupterRegisterSet, registers::InterrupterRegisterSet,
trb_ring::TrbPointer, trb_ring::TrbPointer,
}; };

View file

@ -83,7 +83,7 @@ pub struct HCSParams2 {
/// field indicates the high order 5 bits of the number of Scratchpad Buffers system software shall /// field indicates the high order 5 bits of the number of Scratchpad Buffers system software shall
/// reserve for the xHC. Refer to section 4.20 for more information. /// reserve for the xHC. Refer to section 4.20 for more information.
#[bits(5, access=RO)] #[bits(5, access=RO)]
max_scratchpad_buffers_hi: u16, pub max_scratchpad_buffers_hi: u16,
/// Scratchpad Restore (SPR). Default = implementation dependent. If Max Scratchpad Buffers is > /// Scratchpad Restore (SPR). Default = implementation dependent. If Max Scratchpad Buffers is >
/// 0 then this flag indicates whether the xHC uses the Scratchpad Buffers for saving state when /// 0 then this flag indicates whether the xHC uses the Scratchpad Buffers for saving state when
@ -103,13 +103,7 @@ pub struct HCSParams2 {
/// bits of the number of Scratchpad Buffers system software shall reserve for the xHC. Refer to /// bits of the number of Scratchpad Buffers system software shall reserve for the xHC. Refer to
/// section 4.20 for more information /// section 4.20 for more information
#[bits(5, access=RO)] #[bits(5, access=RO)]
max_scratchpad_buffers_lo: u16, pub max_scratchpad_buffers_lo: u16,
}
impl HCSParams2 {
pub fn max_scratchpad_buffers(&self) -> u16 {
(self.max_scratchpad_buffers_hi()) << 5 | self.max_scratchpad_buffers_lo()
}
} }
#[bitfield(u32)] #[bitfield(u32)]
@ -326,7 +320,7 @@ pub struct HCCParams2 {
/// Hence the grouping of parameters here. /// Hence the grouping of parameters here.
/// ///
/// These registers are located at the addresses specified in BAR0 and BAR1 in the PCI Header. /// These registers are located at the addresses specified in BAR0 and BAR1 in the PCI Header.
#[repr(C)] #[repr(C, packed)]
#[derive(Copy, Clone)] #[derive(Copy, Clone)]
pub struct HostControllerCapabilities { pub struct HostControllerCapabilities {
pub cap_length_and_version: HostControllerCapabilitiesLengthAndVersion, pub cap_length_and_version: HostControllerCapabilitiesLengthAndVersion,

View file

@ -1,8 +1,4 @@
use core::ptr::NonNull;
use alloc::{boxed::Box, vec::Vec};
use bitfield_struct::bitfield; use bitfield_struct::bitfield;
use volatile::VolatileRef;
/// The Doorbell Array is organized as an array of up to 256 Doorbell Registers. One /// The Doorbell Array is organized as an array of up to 256 Doorbell Registers. One
/// 32-bit Doorbell Register is defined in the array for each Device Slot. System /// 32-bit Doorbell Register is defined in the array for each Device Slot. System
@ -68,41 +64,18 @@ pub struct Doorbell {
db_stream_id: u16, db_stream_id: u16,
} }
#[repr(transparent)] impl Doorbell {
pub struct DoorbellPointer(VolatileRef<'static, Doorbell>);
impl DoorbellPointer {
// Construct a new doorbell pointer.
fn new(doorbell: NonNull<Doorbell>) -> Self {
// SAFETY:
// - We allocate this memory in the create
Self(unsafe { VolatileRef::new(doorbell) })
}
pub fn create_command_and_slots(
doorbell_physical: usize,
max_slots: u8,
) -> (Self, Box<[Self]>) {
// Add one for the command doorbell.
let doorbell_cnt = max_slots as usize + 1;
let doorbell_array_size = size_of::<Doorbell>() * doorbell_cnt;
let doorbells: NonNull<Doorbell> = NonNull::new(
mammoth::mem::map_direct_physical_and_leak(doorbell_physical, doorbell_array_size),
)
.unwrap();
let first = DoorbellPointer::new(doorbells);
let remainder = (1..=max_slots)
.map(|offset| {
// SAFETY: We just allocated the array of this size above.
DoorbellPointer::new(unsafe { doorbells.add(offset as usize) })
})
.collect();
(first, remainder)
}
pub fn ring(&mut self, target: u8) { pub fn ring(&mut self, target: u8) {
self.0 // SAFETY:
.as_mut_ptr() // - We know this is a valid reference.
.write(Doorbell::new().with_db_target(target)) unsafe {
core::ptr::write_volatile(
self as *mut _,
Doorbell::new().with_db_target(target).with_db_stream_id(0),
);
}
}
pub fn ring_command(&mut self) {
self.ring(0)
} }
} }

View file

@ -250,68 +250,7 @@ pub struct UsbStatus {
__: u32, __: u32,
} }
impl UsbStatus { #[bitfield(u32)]
// Returns a copy of this object that can be written without overwritting flags that are RW1C.
fn preserving_flags(&self) -> UsbStatus {
self.with_host_system_error(false)
.with_event_interrupt(false)
.with_port_change_detect(false)
.with_save_restore_error(false)
}
}
/// Internal data structure to ensure 64 bit reads and writes.
#[bitfield(u64)]
struct CommandAndStatus {
#[bits(32)]
usb_command: UsbCommand,
#[bits(32)]
usb_status: UsbStatus,
}
impl CommandAndStatus {
fn update_command(&self, f: impl Fn(UsbCommand) -> UsbCommand) -> CommandAndStatus {
CommandAndStatus::new()
.with_usb_command(f(self.usb_command()))
.with_usb_status(self.usb_status().preserving_flags())
}
fn update_status(&self, f: impl Fn(UsbStatus) -> UsbStatus) -> CommandAndStatus {
self.with_usb_status(f(self.usb_status()).preserving_flags())
}
}
#[bitfield(u64)]
struct PageSize {
///Page Size RO. Default = Implementation defined. This field defines the page size supported by
/// the xHC implementation. This xHC supports a page size of 2^(n+12) if bit n is Set. For example, if
/// bit 0 is Set, the xHC supports 4k byte page sizes.
///
/// For a Virtual Function, this register reflects the page size selected in the System Page Size field
/// of the SR-IOV Extended Capability structure. For the Physical Function 0, this register reflects
/// the implementation dependent default xHC page size.
///
/// Various xHC resources reference PAGESIZE to describe their minimum alignment requirements.
///
/// The maximum possible page size is 128M.
#[bits(access=RO)]
page_size: u32,
__: u32,
}
#[bitfield(u64)]
struct DeviceNotificationControl {
__: u32,
/// This register is used by software to enable or disable the reporting of the
/// reception of specific USB Device Notification Transaction Packets. A Notification
/// Enable (Nx, where x = 0 to 15) flag is defined for each of the 16 possible de vice
/// notification types. If a flag is set for a specific notification type, a Device
/// Notification Event shall be generated when the respective notification packet is
/// received. After reset all notifications are disabled. Refer to section 6.4.2.7
device_notification_control: u32,
}
#[bitfield(u64)]
pub struct UsbConfigure { pub struct UsbConfigure {
/// Max Device Slots Enabled (MaxSlotsEn) RW. Default = 0. This field specifies the maximum /// Max Device Slots Enabled (MaxSlotsEn) RW. Default = 0. This field specifies the maximum
/// number of enabled Device Slots. Valid values are in the range of 0 to MaxSlots. Enabled Devices /// number of enabled Device Slots. Valid values are in the range of 0 to MaxSlots. Enabled Devices
@ -334,9 +273,6 @@ pub struct UsbConfigure {
#[bits(22)] #[bits(22)]
__: u32, __: u32,
// Pad to 64 bits for the purposes of reads and writes.
__: u32,
} }
/// XHCI Spec Section 5.4 /// XHCI Spec Section 5.4
@ -345,41 +281,44 @@ pub struct UsbConfigure {
/// Operational Base shall be Dword aligned and is calculated by adding the value /// Operational Base shall be Dword aligned and is calculated by adding the value
/// of the Capability Registers Length (CAPLENGTH) register (refer to Section 5.3.1) /// of the Capability Registers Length (CAPLENGTH) register (refer to Section 5.3.1)
/// to the Capability Base address. All registers are multiples of 32 bits in length /// to the Capability Base address. All registers are multiples of 32 bits in length
#[repr(C)] #[repr(C, packed)]
#[derive(Copy, Clone)] #[derive(Copy, Clone)]
pub struct HostControllerOperational { pub struct HostControllerOperational {
command_and_status: CommandAndStatus, pub usb_command: UsbCommand,
page_size: PageSize, pub usb_status: UsbStatus,
device_notification_control: DeviceNotificationControl, pub page_size: u32,
/// Bit 0: Ring Cycle State (RW) __: u32,
___: u32,
pub device_notification_control: u32,
/// Bit 0: Ring Cycle State (RO)
/// Bit 1: Command Stop (RW1S) /// Bit 1: Command Stop (RW1S)
/// Bit 2: Command Abort (RW1S) /// Bit 2: Command Abort (RW1S)
/// Bit 3: Command Ring Running (RO) /// Bit 3: Command Ring Running (RO)
command_ring_control: u64, pub command_ring_control: u64,
__: u64, ____: u64,
___: u64, _____: u64,
/// The Device Context Base Address Array Pointer Register identifies the base /// The Device Context Base Address Array Pointer Register identifies the base
/// address of the Device Context Base Address Array. /// address of the Device Context Base Address Array.
/// The memory structure referenced by this physical memory pointer is assumed to /// The memory structure referenced by this physical memory pointer is assumed to
/// be physically contiguous and 64-byte aligned. /// be physically contiguous and 64-byte aligned.
device_context_base_address_array_pointer: u64, pub device_context_base_address_array_pointer: u64,
configure: UsbConfigure, pub configure: UsbConfigure,
} }
const _: () = assert!(size_of::<HostControllerOperational>() == 0x40); const _: () = assert!(size_of::<HostControllerOperational>() == 0x3C);
pub struct HostControllerOperationalWrapper { pub struct HostControllerOperationalWrapper {
// TODO: Fix alignment of this type so we can do more targetted reads and writes.
operational: Mutex<VolatileRef<'static, HostControllerOperational>>, operational: Mutex<VolatileRef<'static, HostControllerOperational>>,
// TODO: This should maybe be its own structure. // TODO: This should maybe be its own structure.
ports: Vec<Mutex<VolatileRef<'static, HostControllerUsbPort>>>, ports: Vec<Mutex<VolatileRef<'static, HostControllerUsbPort>>>,
} }
#[allow(dead_code)]
impl HostControllerOperationalWrapper { impl HostControllerOperationalWrapper {
pub fn new(mmio_address: usize) -> (Self, HostControllerCapabilities) { pub fn new(mmio_address: usize) -> (Self, HostControllerCapabilities) {
const MAP_SIZE: usize = 0x1000; const MAP_SIZE: usize = 0x1000;
let caps_ptr: *mut HostControllerCapabilities = let caps_ptr: *mut HostControllerCapabilities =
map_direct_physical_and_leak(mmio_address, MAP_SIZE); map_direct_physical_and_leak(mmio_address as u64, MAP_SIZE as u64);
// SAFETY: // SAFETY:
// - The pointer is valid. // - The pointer is valid.
@ -392,11 +331,7 @@ impl HostControllerOperationalWrapper {
.read() .read()
}; };
assert!( let cap_length_and_version = capabilities.cap_length_and_version;
capabilities.cap_params_1.supports_64_bit(),
"We only support 64 bit XHCI"
);
// TODO: I don't think we acutally handle this properly. // TODO: I don't think we acutally handle this properly.
// SAFETY: XHCI Spec says that this resides in a single page of memory which we mapped // SAFETY: XHCI Spec says that this resides in a single page of memory which we mapped
// above. // above.
@ -404,7 +339,6 @@ impl HostControllerOperationalWrapper {
// BAR0 Size Allocation // BAR0 Size Allocation
// If virtualization is supported, the Capability and Operational Register sets, and // If virtualization is supported, the Capability and Operational Register sets, and
// the Extended Capabilities may reside in a single page of virtual memory, // the Extended Capabilities may reside in a single page of virtual memory,
let cap_length_and_version = capabilities.cap_length_and_version;
let operational_ptr = unsafe { let operational_ptr = unsafe {
(caps_ptr as *mut u8).add(cap_length_and_version.cap_length() as usize) (caps_ptr as *mut u8).add(cap_length_and_version.cap_length() as usize)
as *mut HostControllerOperational as *mut HostControllerOperational
@ -416,15 +350,15 @@ impl HostControllerOperationalWrapper {
let ports_addr = unsafe { (operational_ptr as *mut u8).add(PORT_OFFSET) as usize }; let ports_addr = unsafe { (operational_ptr as *mut u8).add(PORT_OFFSET) as usize };
let ports_space = MAP_SIZE - cap_length_and_version.cap_length() as usize - PORT_OFFSET; let ports_space = MAP_SIZE - cap_length_and_version.cap_length() as usize - PORT_OFFSET;
let max_ports_we_support = ports_space / size_of::<HostControllerUsbPort>(); let max_ports_we_support = ports_space / size_of::<HostControllerUsbPort>();
let max_ports = capabilities.params_1.max_ports(); let params_1 = capabilities.params_1;
assert!( assert!(
max_ports as usize <= max_ports_we_support, params_1.max_ports() as usize <= max_ports_we_support,
"TODO: Support more ports." "TODO: Support more ports."
); );
let mut ports = Vec::new(); let mut ports = Vec::new();
let ports_addr = ports_addr as *mut HostControllerUsbPort; let ports_addr = ports_addr as *mut HostControllerUsbPort;
for port_index in 0..max_ports { for port_index in 0..params_1.max_ports() {
ports.push(unsafe { ports.push(unsafe {
Mutex::new(VolatileRef::new( Mutex::new(VolatileRef::new(
NonNull::new(ports_addr.add(port_index as usize)).unwrap(), NonNull::new(ports_addr.add(port_index as usize)).unwrap(),
@ -442,53 +376,16 @@ impl HostControllerOperationalWrapper {
(operational, capabilities) (operational, capabilities)
} }
pub fn read_command(&self) -> UsbCommand { pub fn update(&self, f: impl Fn(HostControllerOperational) -> HostControllerOperational) {
let locked = self.operational.lock(); self.operational.lock().as_mut_ptr().update(f);
let op = locked.as_ptr();
map_field!(op.command_and_status).read().usb_command()
} }
pub fn update_command(&self, f: impl Fn(UsbCommand) -> UsbCommand) { pub fn status(&self) -> UsbStatus {
let mut locked = self.operational.lock(); self.operational.lock().as_ptr().read().usb_status
let op = locked.as_mut_ptr();
map_field!(op.command_and_status).update(|c_and_s| c_and_s.update_command(f));
} }
pub fn read_status(&self) -> UsbStatus { pub fn command(&self) -> UsbCommand {
let locked = self.operational.lock(); self.operational.lock().as_ptr().read().usb_command
let op = locked.as_ptr();
map_field!(op.command_and_status).read().usb_status()
}
pub fn update_status(&self, f: impl Fn(UsbStatus) -> UsbStatus) {
let mut locked = self.operational.lock();
let op = locked.as_mut_ptr();
map_field!(op.command_and_status).update(|c_and_s| c_and_s.update_status(f));
}
pub fn set_device_context_base_address_array_pointer(&self, pointer: usize) {
let mut locked = self.operational.lock();
let op = locked.as_mut_ptr();
map_field!(op.device_context_base_address_array_pointer).write(pointer as u64);
}
pub fn set_command_ring_dequeue_pointer(&self, pointer: usize, cycle_bit: bool) {
// TODO: Assert that the command ring is not running here.
let mut locked = self.operational.lock();
let op = locked.as_mut_ptr();
map_field!(op.command_ring_control).write(pointer as u64 | cycle_bit as u64);
}
pub fn read_configure(&self) -> UsbConfigure {
let locked = self.operational.lock();
let op = locked.as_ptr();
map_field!(op.configure).read()
}
pub fn update_configure(&self, f: impl Fn(UsbConfigure) -> UsbConfigure) {
let mut locked = self.operational.lock();
let op = locked.as_mut_ptr();
map_field!(op.configure).update(f);
} }
pub fn get_port(&self, index: usize) -> HostControllerUsbPort { pub fn get_port(&self, index: usize) -> HostControllerUsbPort {

View file

@ -3,12 +3,7 @@ use core::task::{Poll, Waker};
use alloc::{collections::vec_deque::VecDeque, sync::Arc, vec::Vec}; use alloc::{collections::vec_deque::VecDeque, sync::Arc, vec::Vec};
use mammoth::sync::Mutex; use mammoth::sync::Mutex;
use crate::xhci::{ use crate::xhci::data_structures::{TransferRequestBlock, TrbLink, TrbRingSegment, TypedTrb};
data_structures::{
TransferRequestBlock, TrbCommandCompletion, TrbLink, TrbRingSegment, TypedTrb,
},
registers::DoorbellPointer,
};
struct TrbFutureState<T> { struct TrbFutureState<T> {
/// Physical Address for the enqueued TRB. /// Physical Address for the enqueued TRB.
@ -147,7 +142,7 @@ impl<T: TypedTrb> TrbRing<T> {
} }
} }
pub fn handle_completion(&mut self, completion_trb: T) { pub fn handle_commpletion(&mut self, completion_trb: T) {
let trb = completion_trb.to_trb(); let trb = completion_trb.to_trb();
let paddr = trb.parameter() as usize; let paddr = trb.parameter() as usize;
let completion = self.pending_futures.pop_front().unwrap(); let completion = self.pending_futures.pop_front().unwrap();
@ -166,25 +161,3 @@ impl<T: TypedTrb> TrbRing<T> {
} }
} }
} }
pub struct CommandRing {
pub trb_ring: TrbRing<TrbCommandCompletion>,
doorbell: DoorbellPointer,
}
impl CommandRing {
pub fn new(doorbell: DoorbellPointer) -> Self {
Self {
trb_ring: TrbRing::new(),
doorbell,
}
}
// We have to explicitly return a future her
pub fn enqueue_command(&mut self, command: impl TypedTrb) -> TrbFuture<TrbCommandCompletion> {
let fut = self.trb_ring.enqueue_trb(command.to_trb());
// Command Doorbell is always 0.
self.doorbell.ring(0);
fut
}
}