Rust XHCI Implementation.

This commit is contained in:
Drew 2025-12-05 22:01:13 -08:00
parent da2eb4fda3
commit c1ab41bbad
19 changed files with 933 additions and 110 deletions

44
rust/Cargo.lock generated
View file

@ -2,12 +2,6 @@
# It is not intended for manual editing. # It is not intended for manual editing.
version = 4 version = 4
[[package]]
name = "autocfg"
version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0"
[[package]] [[package]]
name = "bitfield-struct" name = "bitfield-struct"
version = "0.8.0" version = "0.8.0"
@ -80,11 +74,10 @@ dependencies = [
[[package]] [[package]]
name = "lock_api" name = "lock_api"
version = "0.4.12" version = "0.4.14"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" checksum = "224399e74b87b5f3557511d98dff8b14089b3dadafcab6bb93eab67d3aace965"
dependencies = [ dependencies = [
"autocfg",
"scopeguard", "scopeguard",
] ]
@ -105,9 +98,9 @@ dependencies = [
[[package]] [[package]]
name = "prettyplease" name = "prettyplease"
version = "0.2.20" version = "0.2.37"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5f12335488a2f3b0a83b14edad48dca9879ce89b2edd10e80237e4e852dd645e" checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b"
dependencies = [ dependencies = [
"proc-macro2", "proc-macro2",
"syn", "syn",
@ -115,18 +108,18 @@ dependencies = [
[[package]] [[package]]
name = "proc-macro2" name = "proc-macro2"
version = "1.0.86" version = "1.0.103"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8"
dependencies = [ dependencies = [
"unicode-ident", "unicode-ident",
] ]
[[package]] [[package]]
name = "quote" name = "quote"
version = "1.0.36" version = "1.0.42"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" checksum = "a338cc41d27e6cc6dce6cefc13a0729dfbb81c262b1f519331575dd80ef3067f"
dependencies = [ dependencies = [
"proc-macro2", "proc-macro2",
] ]
@ -148,9 +141,9 @@ dependencies = [
[[package]] [[package]]
name = "syn" name = "syn"
version = "2.0.72" version = "2.0.111"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dc4b9b9bf2add8093d3f2c0204471e951b2285580335de42f9d2534f3ae7a8af" checksum = "390cc9a294ab71bdb1aa2e99d13be9c753cd2d7bd6560c77118597410c4d2e87"
dependencies = [ dependencies = [
"proc-macro2", "proc-macro2",
"quote", "quote",
@ -178,15 +171,15 @@ dependencies = [
[[package]] [[package]]
name = "unicode-ident" name = "unicode-ident"
version = "1.0.12" version = "1.0.22"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5"
[[package]] [[package]]
name = "unicode-segmentation" name = "unicode-segmentation"
version = "1.11.0" version = "1.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202" checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493"
[[package]] [[package]]
name = "victoriafalls" name = "victoriafalls"
@ -200,12 +193,21 @@ dependencies = [
"yunqc", "yunqc",
] ]
[[package]]
name = "volatile"
version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "af8ca9a5d4debca0633e697c88269395493cebf2e10db21ca2dbde37c1356452"
[[package]] [[package]]
name = "voyageurs" name = "voyageurs"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"bitfield-struct 0.12.1", "bitfield-struct 0.12.1",
"mammoth", "mammoth",
"pci",
"volatile",
"yellowstone-yunq",
] ]
[[package]] [[package]]

View file

@ -78,6 +78,10 @@ impl MemoryRegion {
}) })
} }
pub fn vaddr(&self) -> usize {
self.virt_addr as usize
}
pub fn slice<T>(&self) -> &[T] { pub fn slice<T>(&self) -> &[T] {
unsafe { unsafe {
slice::from_raw_parts( slice::from_raw_parts(
@ -246,11 +250,12 @@ pub fn map_cap_and_leak(mem_cap: Capability) -> u64 {
vaddr vaddr
} }
pub fn map_direct_physical_and_leak(paddr: u64, size: u64) -> u64 { pub fn map_direct_physical_and_leak<T>(paddr: usize, size: usize) -> NonNull<T> {
let mem_cap = syscall::memory_object_direct_physical(paddr, size).unwrap(); let mem_cap = syscall::memory_object_direct_physical(paddr as u64, size as u64).unwrap();
let vaddr = syscall::address_space_map(&mem_cap).unwrap(); let vaddr = syscall::address_space_map(&mem_cap).unwrap();
mem_cap.release(); mem_cap.release();
vaddr // UNWRAP: The kernel guarantees this is valid.
NonNull::new(vaddr as *mut T).unwrap()
} }
pub fn map_physical_and_leak(size: u64) -> (u64, u64) { pub fn map_physical_and_leak(size: u64) -> (u64, u64) {

View file

@ -4,17 +4,34 @@ use core::{
ptr::NonNull, ptr::NonNull,
}; };
use alloc::{slice, vec::Vec}; use alloc::{boxed::Box, slice, vec::Vec};
use crate::mem::MemoryRegion; use crate::mem::MemoryRegion;
pub struct PhysicalBox<T: ?Sized> { pub struct PhysicalBox<T: ?Sized> {
data: NonNull<T>, data: NonNull<T>,
#[allow(dead_code)]
region: MemoryRegion, region: MemoryRegion,
physical_address: usize, physical_address: usize,
_marker: PhantomData<T>, _marker: PhantomData<T>,
} }
impl<T> PhysicalBox<T> {
pub fn new(data: T) -> Self {
let (memory_region, paddr) =
MemoryRegion::contiguous_physical(size_of::<T>() as u64).expect("Failed to allocate");
// UNWRAP: We know this isn't null.
let ptr = NonNull::new(memory_region.mut_ptr_at_offset(0)).unwrap();
unsafe { ptr.write(data) };
Self {
data: ptr,
region: memory_region,
physical_address: paddr as usize,
_marker: PhantomData,
}
}
}
impl<T: ?Sized> PhysicalBox<T> { impl<T: ?Sized> PhysicalBox<T> {
pub fn physical_address(&self) -> usize { pub fn physical_address(&self) -> usize {
self.physical_address self.physical_address
@ -50,7 +67,7 @@ impl<T> PhysicalBox<[T]> {
{ {
let layout = core::alloc::Layout::array::<T>(len).expect("Layout overflow"); let layout = core::alloc::Layout::array::<T>(len).expect("Layout overflow");
// TODO: Implement a function like alloc that takes a layout. let (memory_region, paddr) = // TODO: Implement a function like alloc that takes a layout.
let (memory_region, paddr) = let (memory_region, paddr) =
MemoryRegion::contiguous_physical(layout.size() as u64).expect("Failed to allocate"); MemoryRegion::contiguous_physical(layout.size() as u64).expect("Failed to allocate");
@ -122,6 +139,13 @@ where
} }
} }
/// SAFETY: We are the only owner of this pointer.
unsafe impl<T: ?Sized> Send for PhysicalBox<T> where Box<T>: Send {}
/// SAFETY: You must have a mutable reference to this
/// type to modify the data at the pointer.
unsafe impl<T: ?Sized> Sync for PhysicalBox<T> where Box<T>: Sync {}
impl<T: ?Sized> Drop for PhysicalBox<T> { impl<T: ?Sized> Drop for PhysicalBox<T> {
fn drop(&mut self) { fn drop(&mut self) {
// SAFETY: // SAFETY:

View file

@ -125,6 +125,7 @@ impl Executor {
} }
} }
#[derive(Clone)]
pub struct Spawner { pub struct Spawner {
tasks: Arc<Mutex<BTreeMap<TaskId, Task>>>, tasks: Arc<Mutex<BTreeMap<TaskId, Task>>>,
task_queue: Arc<Mutex<VecDeque<TaskId>>>, task_queue: Arc<Mutex<VecDeque<TaskId>>>,

View file

@ -72,15 +72,15 @@ impl PciDevice {
control.capable_address_64(), control.capable_address_64(),
"We don't handle the non-64bit case for MSI yet." "We don't handle the non-64bit case for MSI yet."
); );
assert!(
control.multi_message_capable() == 0, if control.multi_message_capable() != 0 {
"We don't yet handle multi-message capable devices." mammoth::debug!("WARN: We don't yet handle multi-message capable devices.");
); }
// FIXME: These probably need to be volatile writes. // FIXME: These probably need to be volatile writes.
let header: &mut PciDeviceHeader = self.memory_region.as_mut(); let header: &mut PciDeviceHeader = self.memory_region.as_mut();
header.command = header.command.with_interrupt_disable(true); header.command = header.command.with_interrupt_disable(true);
msi_cap.msi_control = control.with_msi_enable(true); msi_cap.msi_control = control.with_msi_enable(true).with_multi_message_enable(0);
// For setting addr and data field, see intel ref // For setting addr and data field, see intel ref
// Vol 3. Section 11.11 // Vol 3. Section 11.11

View file

@ -1,3 +1,5 @@
use core::ffi::c_void;
use alloc::sync::Arc; use alloc::sync::Arc;
use mammoth::{ use mammoth::{
cap::Capability, cap::Capability,
@ -26,8 +28,11 @@ impl AhciController {
pub fn new(pci_memory: Capability) -> Self { pub fn new(pci_memory: Capability) -> Self {
let pci_device = PciDevice::from_cap(pci_memory).unwrap(); let pci_device = PciDevice::from_cap(pci_memory).unwrap();
let hba_vaddr = let hba_vaddr = mem::map_direct_physical_and_leak::<c_void>(
mem::map_direct_physical_and_leak(pci_device.header().bars[5] as u64, 0x1100); pci_device.header().bars[5] as usize,
0x1100,
)
.as_ptr() as u64;
let hba = unsafe { (hba_vaddr as *mut AhciHba).as_mut().unwrap() }; let hba = unsafe { (hba_vaddr as *mut AhciHba).as_mut().unwrap() };
let mut controller = Self { let mut controller = Self {
pci_device: Mutex::new(pci_device), pci_device: Mutex::new(pci_device),

View file

@ -6,3 +6,10 @@ edition = "2024"
[dependencies] [dependencies]
bitfield-struct = "0.12" bitfield-struct = "0.12"
mammoth = { path = "../../lib/mammoth/" } mammoth = { path = "../../lib/mammoth/" }
pci = { path = "../../lib/pci" }
volatile = "0.6.1"
yellowstone-yunq = { version = "0.1.0", path = "../../lib/yellowstone" }
[features]
default = ["debug"]
debug = []

View file

@ -5,12 +5,47 @@ extern crate alloc;
mod xhci; mod xhci;
use mammoth::{debug, define_entry, zion::z_err_t}; use alloc::sync::Arc;
use mammoth::{
cap::Capability,
debug, define_entry,
sync::Mutex,
task::{Executor, Task},
zion::z_err_t,
};
use pci::PciDevice;
use xhci::driver::XHCIDriver;
define_entry!(); define_entry!();
#[unsafe(no_mangle)] #[unsafe(no_mangle)]
extern "C" fn main() -> z_err_t { extern "C" fn main() -> z_err_t {
debug!("In Voyageurs"); #[cfg(feature = "debug")]
debug!("Voyageurs Starting.");
let yellowstone = yellowstone_yunq::from_init_endpoint();
let xhci_info = yellowstone
.get_xhci_info()
.expect("Failed to get XHCI info from yellowstone.");
let pci_device = PciDevice::from_cap(Capability::take(xhci_info.xhci_region)).unwrap();
let xhci_driver = Arc::new(XHCIDriver::from_pci_device(pci_device));
let executor = Arc::new(Mutex::new(Executor::new()));
let driver_clone = xhci_driver.clone();
let spawner = executor.clone().lock().new_spawner();
let interrupt_thread = mammoth::thread::spawn(move || driver_clone.interrupt_loop(spawner));
executor
.clone()
.lock()
.spawn(Task::new(async move { xhci_driver.startup().await }));
executor.clone().lock().run();
interrupt_thread.join().unwrap();
0 0
} }

View file

@ -0,0 +1,104 @@
use alloc::boxed::Box;
use mammoth::physical_box::PhysicalBox;
use crate::xhci::{
data_structures::{
DeviceContext, EndpointContextFields, EndpointState, EndpointType, InputContext,
TRDequeuePointer, TrbTransferEvent,
},
registers::DoorbellPointer,
trb_ring::TrbRing,
};
struct DeviceContextBaseArray(PhysicalBox<[u64]>);
impl DeviceContextBaseArray {
pub fn new(max_slots: u8) -> Self {
Self(PhysicalBox::default_with_count(0, max_slots as usize + 1))
}
}
pub struct DeviceSlot {
device_context: PhysicalBox<DeviceContext>,
endpoint_0_transfer_ring: TrbRing<TrbTransferEvent>,
}
impl DeviceSlot {
fn new() -> Self {
Self {
device_context: PhysicalBox::new(DeviceContext::default()),
endpoint_0_transfer_ring: TrbRing::new(),
}
}
}
pub struct DeviceSlotManager {
device_context_base_array: DeviceContextBaseArray,
slots: Box<[Option<DeviceSlot>]>,
doorbells: Box<[DoorbellPointer]>,
}
impl DeviceSlotManager {
pub fn new(max_slots: u8, doorbells: Box<[DoorbellPointer]>) -> Self {
assert!(
doorbells.len() == max_slots as usize,
"Got an incorrect doorbell slice size."
);
Self {
device_context_base_array: DeviceContextBaseArray::new(max_slots),
slots: core::iter::repeat_with(|| None)
.take(max_slots as usize)
.collect(),
doorbells,
}
}
pub fn device_context_base_array_physical_address(&self) -> usize {
self.device_context_base_array.0.physical_address()
}
/// Prepares a slot and an input context for an address device command.
///
/// Follows section 4.6.5 of the XHCI spec.
pub fn prep_slot_for_address_device(
&mut self,
slot_id: u8,
port_number: u8,
) -> PhysicalBox<InputContext> {
// TODO: Ensure alignment
let device_slot = DeviceSlot::new();
let mut input_context = PhysicalBox::new(InputContext::default());
// The Add Context flags for the Slot Context and the Endpoint 0 Context shall be set to 1.
input_context.input_control_context.add_context_flags = 0x3;
// See XHCI 4.5.2 for information
input_context.slot_context.fields = input_context
.slot_context
.fields
.with_root_hub_port_number(port_number)
.with_route_string(0)
.with_context_entries(1)
.with_interrupter_target(0);
// The Endpoint 0 Context data structure in the
// Input Context shall define valid values for the TR Dequeue Pointer, EP Type, Error
// Count (CErr), and Max Packet Size fields. The MaxPStreams, Max Burst Size, and
// EP State values shall be cleared to '0'
input_context.endpoint_context_0.tr_deque_pointer = TRDequeuePointer::new()
.with_pointer(device_slot.endpoint_0_transfer_ring.physical_base_address() as u64)
.with_dequeue_cycle_state(true);
input_context.endpoint_context_0.fields = EndpointContextFields::new()
.with_endpoint_type(EndpointType::Control)
.with_max_primary_streams(0)
.with_max_burst_size(0)
.with_endpoint_state(EndpointState::Disabled);
self.device_context_base_array.0[slot_id as usize] =
device_slot.device_context.physical_address() as u64;
self.slots[slot_id as usize - 1] = Some(device_slot);
input_context
}
}

View file

@ -0,0 +1,276 @@
use alloc::boxed::Box;
use alloc::sync::Arc;
use mammoth::cap::Capability;
use mammoth::sync::Mutex;
use mammoth::task::Spawner;
use mammoth::task::Task;
use super::registers::{self};
use crate::xhci::data_structures::CommandCompletionCode;
use crate::xhci::data_structures::TransferRequestBlock;
use crate::xhci::data_structures::TrbAddressDeviceCommand;
use crate::xhci::data_structures::TrbCommandCompletion;
use crate::xhci::data_structures::TrbEnableSlotCommand;
use crate::xhci::data_structures::TrbNoOp;
use crate::xhci::data_structures::TrbPortStatusChangeEvent;
use crate::xhci::data_structures::TrbType;
use crate::xhci::data_structures::TypedTrb;
use crate::xhci::device_context_base_array::DeviceSlotManager;
use crate::xhci::interrupter::Interrupter;
use crate::xhci::registers::DoorbellPointer;
use crate::xhci::registers::HostControllerOperationalWrapper;
use crate::xhci::registers::InterrupterRegisterSet;
use crate::xhci::registers::PortStatusAndControl;
use crate::xhci::trb_ring::CommandRing;
pub struct XHCIDriver {
#[allow(dead_code)]
pci_device: pci::PciDevice,
capabilities: registers::HostControllerCapabilities,
operational: HostControllerOperationalWrapper,
command_ring: Mutex<CommandRing>,
// TODO: Add multiple interrupters.
interrupter: Mutex<Interrupter>,
device_slot_manager: Mutex<DeviceSlotManager>,
}
impl XHCIDriver {
pub fn from_pci_device(mut pci_device: pci::PciDevice) -> Self {
let address =
((pci_device.header().bars[1] as usize) << 32) | (pci_device.header().bars[0] as usize);
let irq_port_cap = pci_device.register_msi().unwrap();
let (operational, capabilities) = HostControllerOperationalWrapper::new(address as usize);
let max_slots = capabilities.params_1.max_device_slots();
let doorbell_physical = address + capabilities.doorbell_offset as usize;
let (command_doorbell, slot_doorbells) =
DoorbellPointer::create_command_and_slots(doorbell_physical, max_slots);
// Offset to skip the mfindex register.
let interrupter_registers = mammoth::mem::map_direct_physical_and_leak(
address + capabilities.runtime_register_space_offset as usize,
size_of::<InterrupterRegisterSet>() * 2,
);
let interrupter_registers = unsafe { interrupter_registers.add(1) };
let mut driver = Self {
pci_device,
capabilities,
operational,
command_ring: Mutex::new(CommandRing::new(command_doorbell)),
interrupter: Mutex::new(Interrupter::new(interrupter_registers, irq_port_cap)),
device_slot_manager: Mutex::new(DeviceSlotManager::new(max_slots, slot_doorbells)),
};
driver.initialize();
driver
}
fn initialize(&mut self) {
#[cfg(feature = "debug")]
mammoth::debug!("Stopping XHCI Controller.");
// Stop the host controller.
self.operational
.update_command(|cmd| cmd.with_run_stop(false));
#[cfg(feature = "debug")]
mammoth::debug!("Waiting for controller to halt.");
// Sleep until the controller is halted.
let mut status = self.operational.read_status();
while !status.host_controller_halted() {
// TODO: Sleep for how long?
mammoth::syscall::thread_sleep(50).unwrap();
status = self.operational.read_status();
}
#[cfg(feature = "debug")]
mammoth::debug!("Resetting Controller.");
self.operational
.update_command(|cmd| cmd.with_host_controller_reset(true));
let mut command: registers::UsbCommand = self.operational.read_command();
while command.host_controller_reset() {
// TODO: Sleep for how long?
mammoth::syscall::thread_sleep(50).unwrap();
command = self.operational.read_command();
}
#[cfg(feature = "debug")]
mammoth::debug!("XHCI Controller Reset, waiting ready.");
let mut status = self.operational.read_status();
while status.controller_not_ready() {
// TODO: Sleep for how long?
mammoth::syscall::thread_sleep(50).unwrap();
status = self.operational.read_status();
}
#[cfg(feature = "debug")]
mammoth::debug!("XHCI Controller Ready.");
#[cfg(feature = "debug")]
mammoth::debug!("Setting Command Ring");
self.operational.set_command_ring_dequeue_pointer(
self.command_ring.lock().trb_ring.physical_base_address(),
true,
);
#[cfg(feature = "debug")]
mammoth::debug!("Setting DCBA.");
self.operational
.set_device_context_base_address_array_pointer(
self.device_slot_manager
.lock()
.device_context_base_array_physical_address(),
);
// We tell the controller that we can support as many slots as it does because
// we allocate a full 4K page to the DCBA, which is 256 entries and the max
// slots are 255.
self.operational.update_configure(|cfg| {
cfg.with_max_device_slots_enabled(self.capabilities.params_1.max_device_slots())
});
assert!(
self.capabilities.params_2.max_scratchpad_buffers() == 0,
"Unsupported scratchpad buffers."
);
#[cfg(feature = "debug")]
mammoth::debug!("Resetting event ring.");
// SAFETY: The HC is stopped.
unsafe { self.interrupter.lock().reset() };
self.operational
.update_command(|cmd| cmd.with_run_stop(true).with_interrupter_enable(true));
#[cfg(feature = "debug")]
mammoth::debug!("Enabled interrupts and controller.");
}
pub fn interrupt_loop(self: Arc<Self>, spawner: Spawner) {
let completion_handler = |trb: TransferRequestBlock| {
self.clone().handle_completion(spawner.clone(), trb);
};
self.interrupter.lock().interrupt_loop(completion_handler);
}
fn handle_completion(self: Arc<XHCIDriver>, spawner: Spawner, trb: TransferRequestBlock) {
match trb.trb_type() {
TrbType::TransferEvent => {
todo!("Handle Transfer")
}
TrbType::CommandCompletionEvent => {
self.command_ring
.lock()
.trb_ring
.handle_completion(TrbCommandCompletion::from_trb(trb));
}
TrbType::PortStatusChangeEvent => {
let trb = TrbPortStatusChangeEvent::from_trb(trb);
let self_clone = self.clone();
spawner.spawn(Task::new(async move {
self_clone.port_status_change(trb).await
}));
}
_ => {
panic!("Unhandled event type: {:?}", trb.trb_type());
}
}
}
async fn send_command(&self, trb: impl TypedTrb) -> TrbCommandCompletion {
// Split the future and the await so the lock is dropped before we await.
let future = { self.command_ring.lock().enqueue_command(trb) };
future.await
}
pub async fn startup(&self) {
#[cfg(feature = "debug")]
mammoth::debug!("Sending no op command.");
let result = self.send_command(TrbNoOp::new()).await;
assert!(result.completion_code() == CommandCompletionCode::Success.into_bits());
#[cfg(feature = "debug")]
mammoth::debug!("Successfully tested no op command.");
#[cfg(feature = "debug")]
mammoth::debug!("Resetting all connected ports.");
for port_index in 0..self.operational.num_ports() {
self.operational
.update_port_status(port_index, |p| p.clear_change_bits());
}
for port_index in 0..self.operational.num_ports() {
let status = self.operational.get_port(port_index).status_and_control;
if status.port_power() && status.current_connect_status() {
mammoth::debug!("Resetting port {}", port_index);
self.operational.update_port_status(port_index, |_| {
PortStatusAndControl::new()
.with_port_reset(true)
.with_port_power(true)
});
}
}
}
async fn port_status_change(self: Arc<Self>, status_change: TrbPortStatusChangeEvent) {
// Ports are indexed from 1.
let port_id = status_change.port_id();
let port_index = (port_id - 1) as usize;
let port_status = self
.operational
.get_port(port_index as usize)
.status_and_control;
#[cfg(feature = "debug")]
mammoth::debug!("Port status change for port {}", port_id);
if !port_status.port_reset_change() {
mammoth::debug!(
"Unknown port status event, not handling. status= {:?}",
port_status
);
return;
}
self.operational
.update_port_status(port_index, |s| s.clear_change_bits());
#[cfg(feature = "debug")]
mammoth::debug!("Enabling slot.");
let resp = self.send_command(TrbEnableSlotCommand::new()).await;
assert!(resp.completion_code() == CommandCompletionCode::Success.into_bits());
let slot = resp.slot_id();
#[cfg(feature = "debug")]
mammoth::debug!("Creating slot data structures in slot {}.", slot);
let input_context = self
.device_slot_manager
.lock()
.prep_slot_for_address_device(slot, port_id);
#[cfg(feature = "debug")]
mammoth::debug!("Sending address device.");
let resp = self
.send_command(
TrbAddressDeviceCommand::new()
.with_slot_id(slot)
.with_input_context_pointer(input_context.physical_address() as u64),
)
.await;
assert!(resp.completion_code() == CommandCompletionCode::Success.into_bits());
}
}

View file

@ -0,0 +1,70 @@
use alloc::vec::Vec;
use crate::xhci::{
data_structures::{EventRingSegmentTable, TransferRequestBlock, TrbRingSegment},
trb_ring::TrbPointer,
};
pub struct EventRing {
segment_table: EventRingSegmentTable,
segments: Vec<TrbRingSegment>,
cycle_bit: bool,
trb_pointer: TrbPointer,
}
impl EventRing {
pub fn new() -> Self {
// Software maintains an Event Ring Consumer Cycle State (CCS) bit, initializing it
// to 1...
let cycle_bit = true;
let mut event_ring = Self {
segment_table: EventRingSegmentTable::new(1),
segments: [TrbRingSegment::new(100)].into(),
cycle_bit,
trb_pointer: TrbPointer::default(),
};
event_ring.segment_table[0].from_trb_ring(&event_ring.segments[0]);
event_ring
}
pub fn segment_table(&self) -> &EventRingSegmentTable {
&self.segment_table
}
pub fn erdp_physical_address(&self) -> usize {
self.segments[self.trb_pointer.segment_index].physical_address()
+ self.trb_pointer.segment_physical_offset()
}
fn current_trb(&self) -> TransferRequestBlock {
// TODO: These should be volatile reads.
self.segments[self.trb_pointer.segment_index][self.trb_pointer.segment_offset]
}
fn increment_pointer(&mut self) {
self.trb_pointer.segment_offset += 1;
if self.trb_pointer.segment_offset == self.segments[self.trb_pointer.segment_index].len() {
self.trb_pointer.segment_index += 1;
self.trb_pointer.segment_offset = 0;
if self.trb_pointer.segment_index == self.segments.len() {
// Wrap around to front.
self.trb_pointer.segment_index = 0;
self.cycle_bit = !self.cycle_bit;
}
}
}
pub fn get_next(&mut self) -> Option<TransferRequestBlock> {
let curr = self.current_trb();
if curr.cycle() != self.cycle_bit {
None
} else {
self.increment_pointer();
Some(curr)
}
}
}

View file

@ -0,0 +1,64 @@
use core::ptr::NonNull;
use alloc::boxed::Box;
use mammoth::cap::Capability;
use crate::xhci::{
data_structures::TransferRequestBlock,
event_ring::EventRing,
registers::{InterrupterModeration, InterrupterRegisterSet, InterrupterRegisters},
};
pub struct Interrupter {
event_ring: EventRing,
register_set: InterrupterRegisters,
irq_port_cap: Capability,
}
impl Interrupter {
pub fn new(
interrupter_register_set: NonNull<InterrupterRegisterSet>,
irq_port_cap: Capability,
) -> Self {
Self {
event_ring: EventRing::new(),
register_set: InterrupterRegisters::new(interrupter_register_set),
irq_port_cap,
}
}
// SAFETY:
// - HC Must be halted for interrupter 0.
pub unsafe fn reset(&mut self) {
// SAFETY:
// - THe segment table is size 1.
unsafe {
self.register_set.set_event_ring(
self.event_ring.segment_table(),
self.event_ring.erdp_physical_address(),
);
}
self.register_set.set_moderation(
InterrupterModeration::new()
.with_interrupt_moderation_interval(4000)
.with_interrupt_moderation_counter(0),
);
self.register_set.enable_interrupts();
}
pub fn interrupt_loop(&mut self, completion_handler: impl Fn(TransferRequestBlock) -> ()) {
loop {
let _ = mammoth::syscall::port_recv(&self.irq_port_cap, &mut [], &mut []).unwrap();
#[cfg(feature = "debug")]
mammoth::debug!("Received Interrupt.");
while let Some(trb) = self.event_ring.get_next() {
completion_handler(trb);
self.register_set
.update_dequeue_pointer_clearing_busy(self.event_ring.erdp_physical_address());
}
}
}
}

View file

@ -1,2 +1,7 @@
pub mod data_structures; mod data_structures;
pub mod registers; mod device_context_base_array;
pub mod driver;
mod event_ring;
mod interrupter;
mod registers;
mod trb_ring;

View file

@ -86,10 +86,8 @@ impl DoorbellPointer {
// Add one for the command doorbell. // Add one for the command doorbell.
let doorbell_cnt = max_slots as usize + 1; let doorbell_cnt = max_slots as usize + 1;
let doorbell_array_size = size_of::<Doorbell>() * doorbell_cnt; let doorbell_array_size = size_of::<Doorbell>() * doorbell_cnt;
let doorbells: NonNull<Doorbell> = NonNull::new( let doorbells: NonNull<Doorbell> =
mammoth::mem::map_direct_physical_and_leak(doorbell_physical, doorbell_array_size), mammoth::mem::map_direct_physical_and_leak(doorbell_physical, doorbell_array_size);
)
.unwrap();
let first = DoorbellPointer::new(doorbells); let first = DoorbellPointer::new(doorbells);
let remainder = (1..=max_slots) let remainder = (1..=max_slots)
.map(|offset| { .map(|offset| {

View file

@ -378,19 +378,13 @@ pub struct HostControllerOperationalWrapper {
impl HostControllerOperationalWrapper { impl HostControllerOperationalWrapper {
pub fn new(mmio_address: usize) -> (Self, HostControllerCapabilities) { pub fn new(mmio_address: usize) -> (Self, HostControllerCapabilities) {
const MAP_SIZE: usize = 0x1000; const MAP_SIZE: usize = 0x1000;
let caps_ptr: *mut HostControllerCapabilities = let caps_ptr: NonNull<HostControllerCapabilities> =
map_direct_physical_and_leak(mmio_address, MAP_SIZE); map_direct_physical_and_leak(mmio_address, MAP_SIZE);
// SAFETY: // SAFETY:
// - The pointer is valid. // - The pointer is valid.
// - No other thread has access in this block. // - No other thread has access in this block.
let capabilities = unsafe { let capabilities = unsafe { VolatilePtr::new(caps_ptr).read() };
VolatilePtr::new(
// UNWRAP: We just constructed this object with a non-null value.
NonNull::new(caps_ptr).unwrap(),
)
.read()
};
assert!( assert!(
capabilities.cap_params_1.supports_64_bit(), capabilities.cap_params_1.supports_64_bit(),
@ -406,7 +400,7 @@ impl HostControllerOperationalWrapper {
// the Extended Capabilities may reside in a single page of virtual memory, // the Extended Capabilities may reside in a single page of virtual memory,
let cap_length_and_version = capabilities.cap_length_and_version; let cap_length_and_version = capabilities.cap_length_and_version;
let operational_ptr = unsafe { let operational_ptr = unsafe {
(caps_ptr as *mut u8).add(cap_length_and_version.cap_length() as usize) (caps_ptr.as_ptr() as *mut u8).add(cap_length_and_version.cap_length() as usize)
as *mut HostControllerOperational as *mut HostControllerOperational
}; };

View file

@ -1,4 +1,7 @@
use core::ptr::NonNull;
use bitfield_struct::bitfield; use bitfield_struct::bitfield;
use volatile::{VolatileRef, map_field};
use crate::xhci::data_structures::EventRingSegmentTable; use crate::xhci::data_structures::EventRingSegmentTable;
@ -7,18 +10,18 @@ use crate::xhci::data_structures::EventRingSegmentTable;
/// ///
/// XHCI 5.5.2.1 /// XHCI 5.5.2.1
#[bitfield(u32)] #[bitfield(u32)]
pub struct InterrupterManagement { struct InterrupterManagement {
/// Interrupt Pending (IP) - RW1C. Default = 0. This flag represents the current state of the /// Interrupt Pending (IP) - RW1C. Default = 0. This flag represents the current state of the
/// Interrupter. If IP = 1, an interrupt is pending for this Interrupter. A 0 value indicates that no /// Interrupter. If IP = 1, an interrupt is pending for this Interrupter. A 0 value indicates that no
/// interrupt is pending for the Interrupter. Refer to section 4.17.3 for the conditions that modify /// interrupt is pending for the Interrupter. Refer to section 4.17.3 for the conditions that modify
/// the state of this flag. /// the state of this flag.
pub interrupt_pending: bool, interrupt_pending: bool,
/// Interrupt Enable (IE) RW. Default = 0. This flag specifies whether the Interrupter is capable of /// Interrupt Enable (IE) RW. Default = 0. This flag specifies whether the Interrupter is capable of
/// generating an interrupt. When this bit and the IP bit are set (1), the Interrupter shall generate /// generating an interrupt. When this bit and the IP bit are set (1), the Interrupter shall generate
/// an interrupt when the Interrupter Moderation Counter reaches 0. If this bit is 0, then the /// an interrupt when the Interrupter Moderation Counter reaches 0. If this bit is 0, then the
/// Interrupter is prohibited from generating interrupts /// Interrupter is prohibited from generating interrupts
pub interrupt_enabled: bool, interrupt_enabled: bool,
#[bits(30)] #[bits(30)]
_reserved: u32, _reserved: u32,
@ -45,14 +48,21 @@ pub struct InterrupterModeration {
pub interrupt_moderation_counter: u16, pub interrupt_moderation_counter: u16,
} }
/// The Event Ring Segment Table Size Register defines the number of segments #[bitfield(u64)]
/// supported by the Event Ring Segment Table. struct ManagementAndModeration {
/// #[bits(32)]
/// XHCI 5.5.2.3.1 management: InterrupterManagement,
#[bitfield(u32)]
pub struct EventRingSegmentTableSize { #[bits(32)]
pub event_ring_segment_table_size: u16, moderation: InterrupterModeration,
_reserved: u16, }
impl ManagementAndModeration {
fn update_moderation(self, moderation: InterrupterModeration) -> Self {
// Update preserving intterupt pending.
self.with_management(self.management().with_interrupt_pending(false))
.with_moderation(moderation)
}
} }
/// The Event Ring Dequeue Pointer Register is written by software to define the /// The Event Ring Dequeue Pointer Register is written by software to define the
@ -61,35 +71,46 @@ pub struct EventRingSegmentTableSize {
/// ///
/// XHCI 5.5.2.3.3 /// XHCI 5.5.2.3.3
#[bitfield(u64)] #[bitfield(u64)]
pub struct EventRingDequePointer { struct EventRingDequePointer {
/// Dequeue ERST Segment Index (DESI) RW. Default = 0. This field may be used by the xHC to /// Dequeue ERST Segment Index (DESI) RW. Default = 0. This field may be used by the xHC to
/// accelerate checking the Event Ring full condition. This field is written with the low order 3 bits of /// accelerate checking the Event Ring full condition. This field is written with the low order 3 bits of
/// the offset of the ERST entry which defines the Event Ring segment that the Event Ring Dequeue /// the offset of the ERST entry which defines the Event Ring segment that the Event Ring Dequeue
/// Pointer resides in. Refer to section 6.5 for the definition of an ERST entry. /// Pointer resides in. Refer to section 6.5 for the definition of an ERST entry.
#[bits(3)] #[bits(3)]
pub dequeue_erst_segment_index: u8, dequeue_erst_segment_index: u8,
/// Event Handler Busy (EHB) - RW1C. Default = 0. This flag shall be set to 1 when the IP bit is set /// Event Handler Busy (EHB) - RW1C. Default = 0. This flag shall be set to 1 when the IP bit is set
/// to 1 and cleared to 0 by software when the Dequeue Pointer register is written. Refer to /// to 1 and cleared to 0 by software when the Dequeue Pointer register is written. Refer to
/// section 4.17.2 for more information /// section 4.17.2 for more information
pub event_handler_busy: bool, event_handler_busy: bool,
/// Event Ring Dequeue Pointer - RW. Default = 0. This field defines the high order bits of the 64- /// Event Ring Dequeue Pointer - RW. Default = 0. This field defines the high order bits of the 64-
/// bit address of the current Event Ring Dequeue Pointer /// bit address of the current Event Ring Dequeue Pointer
#[bits(60)] #[bits(60)]
pub event_ring_dequeue_pointer: u64, event_ring_dequeue_pointer: u64,
} }
/// This is an array of registers starting at offset 0x20 of the Runtime Base. impl EventRingDequePointer {
/// The Runtime Base shall be 32-byte aligned and is calculated by adding the fn with_dequeue_pointer_adjusted(self, event_ring_dequeue_pointer: usize) -> Self {
/// value Runtime Register Space Offset register (refer to Section 5.3.8) to assert!(
/// the Capability Base address. All Runtime registers are multiples of 32 bits in length. event_ring_dequeue_pointer & 0b1111 == 0,
"Bottom four bits of event ring dequeue pointer must be 0"
);
self.with_event_ring_dequeue_pointer((event_ring_dequeue_pointer >> 4) as u64)
}
fn clear_event_hanlder_busy(self) -> Self {
// RW1C
self.with_event_handler_busy(true)
}
}
/// The Event Ring Segment Table Size Register defines the number of segments
/// supported by the Event Ring Segment Table.
/// ///
/// XHCI Spec 5.5.2 /// XHCI 5.5.2.3.1
#[repr(C, packed)] #[bitfield(u64)]
pub struct InterrupterRegisterSet { struct EventRingSegmentTableSize {
pub interrupter_management: InterrupterManagement,
pub interrupter_moderation: InterrupterModeration,
/// Event Ring Segment Table Size RW. Default = 0. This field identifies the number of valid /// Event Ring Segment Table Size RW. Default = 0. This field identifies the number of valid
/// Event Ring Segment Table entries in the Event Ring Segment Table pointed to by the Event Ring /// Event Ring Segment Table entries in the Event Ring Segment Table pointed to by the Event Ring
/// Segment Table Base Address register. The maximum value supported by an xHC /// Segment Table Base Address register. The maximum value supported by an xHC
@ -100,8 +121,21 @@ pub struct InterrupterRegisterSet {
/// Ring. /// Ring.
/// For the Primary Interrupter: Writing a value of 0 to this field shall result in undefined behavior /// For the Primary Interrupter: Writing a value of 0 to this field shall result in undefined behavior
/// of the Event Ring. The Primary Event Ring cannot be disabled. /// of the Event Ring. The Primary Event Ring cannot be disabled.
event_ring_segment_table_size: u32, event_ring_segment_table_size: u16,
___: u32, __: u16,
__: u32,
}
/// This is an array of registers starting at offset 0x20 of the Runtime Base.
/// The Runtime Base shall be 32-byte aligned and is calculated by adding the
/// value Runtime Register Space Offset register (refer to Section 5.3.8) to
/// the Capability Base address. All Runtime registers are multiples of 32 bits in length.
///
/// XHCI Spec 5.5.2
#[repr(C)]
pub struct InterrupterRegisterSet {
management_and_moderation: ManagementAndModeration,
event_ring_segment_table_size: EventRingSegmentTableSize,
/// Event Ring Segment Table Base Address Register RW. Default = 0. This field defines the /// Event Ring Segment Table Base Address Register RW. Default = 0. This field defines the
/// high order bits of the start address of the Event Ring Segment Table. /// high order bits of the start address of the Event Ring Segment Table.
/// Writing this register sets the Event Ring State Machine:EREP Advancement to the Start state. /// Writing this register sets the Event Ring State Machine:EREP Advancement to the Start state.
@ -113,10 +147,19 @@ pub struct InterrupterRegisterSet {
/// ///
/// XHCI 5.5.2.3.2 /// XHCI 5.5.2.3.2
event_ring_segment_table_base_address: u64, event_ring_segment_table_base_address: u64,
event_ring_deque_pointer: u64, event_ring_deque_pointer: EventRingDequePointer,
}
const _: () = assert!(size_of::<InterrupterRegisterSet>() == 0x20);
pub struct InterrupterRegisters(VolatileRef<'static, InterrupterRegisterSet>);
impl InterrupterRegisters {
pub fn new(interrupter_register_set: NonNull<InterrupterRegisterSet>) -> Self {
// TODO: Validate safety.
unsafe { Self(VolatileRef::new(interrupter_register_set)) }
} }
impl InterrupterRegisterSet {
/// SAFETY: /// SAFETY:
/// - For the primary interrupter HC must be halted. /// - For the primary interrupter HC must be halted.
/// - The event rings size must be at most ERST_MAX from HCSPARAMS2 /// - The event rings size must be at most ERST_MAX from HCSPARAMS2
@ -128,38 +171,36 @@ impl InterrupterRegisterSet {
// NOTE: We must write the size before the base address otherwise qemu is unhappy. // NOTE: We must write the size before the base address otherwise qemu is unhappy.
// Not sure if this is required by the spec. // Not sure if this is required by the spec.
// SAFETY: let internal = self.0.as_mut_ptr();
// - We know this address is valid and we have a mut reference to it. map_field!(internal.event_ring_segment_table_size).write(
unsafe { EventRingSegmentTableSize::new()
core::ptr::write_volatile( .with_event_ring_segment_table_size(event_ring_segment_table.len() as u16),
core::ptr::addr_of!(self.event_ring_segment_table_size) as *mut _,
event_ring_segment_table.len() as u32,
); );
map_field!(internal.event_ring_segment_table_base_address)
core::ptr::write_volatile( .write(event_ring_segment_table.physical_address() as u64);
core::ptr::addr_of!(self.event_ring_segment_table_base_address) as *mut _, map_field!(internal.event_ring_deque_pointer).write(
event_ring_segment_table.physical_address(), EventRingDequePointer::new().with_dequeue_pointer_adjusted(event_ring_dequeue_pointer),
);
core::ptr::write_volatile(
core::ptr::addr_of!(self.event_ring_deque_pointer) as *mut _,
event_ring_dequeue_pointer,
); );
} }
pub fn update_dequeue_pointer_clearing_busy(&mut self, event_ring_dequeue_pointer: usize) {
let internal = self.0.as_mut_ptr();
map_field!(internal.event_ring_deque_pointer).update(|ptr| {
ptr.with_dequeue_pointer_adjusted(event_ring_dequeue_pointer)
.clear_event_hanlder_busy()
});
} }
pub fn update_dequeue_pointer(&mut self, event_ring_dequeue_pointer: usize) { pub fn set_moderation(&mut self, moderation: InterrupterModeration) {
// SAFETY: let internal = self.0.as_mut_ptr();
// - We know this address is valid and we have a mut pointer to it. map_field!(internal.management_and_moderation)
unsafe { .update(|reg| reg.update_moderation(moderation));
// TODO: Preserve lower bits, also update it to make it clear that we
// are clearing the EHB bit.
core::ptr::write_volatile(
core::ptr::addr_of!(self.event_ring_deque_pointer) as *mut _,
event_ring_dequeue_pointer | (1 << 3),
)
}
}
} }
const _: () = assert!(size_of::<InterrupterRegisterSet>() == 0x20); pub fn enable_interrupts(&mut self) {
let internal = self.0.as_mut_ptr();
map_field!(internal.management_and_moderation).update(|reg| {
reg.with_management(InterrupterManagement::new().with_interrupt_enabled(true))
});
}
}

View file

@ -0,0 +1,190 @@
use core::task::{Poll, Waker};
use alloc::{collections::vec_deque::VecDeque, sync::Arc, vec::Vec};
use mammoth::sync::Mutex;
use crate::xhci::{
data_structures::{
TransferRequestBlock, TrbCommandCompletion, TrbLink, TrbRingSegment, TypedTrb,
},
registers::DoorbellPointer,
};
struct TrbFutureState<T> {
/// Physical Address for the enqueued TRB.
/// Used for sanity checking.
physical_address: usize,
waker: Option<Waker>,
response: Option<T>,
}
#[derive(Clone)]
pub struct TrbFuture<T: TypedTrb> {
state: Arc<Mutex<TrbFutureState<T>>>,
}
impl<T: TypedTrb> TrbFuture<T> {
fn new(paddr: usize) -> Self {
Self {
state: Arc::new(Mutex::new(TrbFutureState {
physical_address: paddr,
waker: None,
response: None,
})),
}
}
}
impl<T: TypedTrb> Future for TrbFuture<T> {
type Output = T;
fn poll(
self: core::pin::Pin<&mut Self>,
cx: &mut core::task::Context<'_>,
) -> core::task::Poll<Self::Output> {
let mut state = self.state.lock();
match state.response {
Some(trb) => Poll::Ready(trb),
None => {
state.waker = Some(cx.waker().clone());
Poll::Pending
}
}
}
}
#[derive(Default, Copy, Clone, Debug)]
pub struct TrbPointer {
/// Index into the vector of trb segments.
pub segment_index: usize,
/// Index into the specific segment.
/// This is a TransferRequestBlock index,
/// to get the physical_offset use segment_physical_offset()
pub segment_offset: usize,
}
impl TrbPointer {
pub fn segment_physical_offset(&self) -> usize {
self.segment_offset * size_of::<TransferRequestBlock>()
}
}
pub struct TrbRing<T: TypedTrb> {
segments: Vec<TrbRingSegment>,
enqueue_pointer: TrbPointer,
cycle_bit: bool,
pending_futures: VecDeque<TrbFuture<T>>,
}
impl<T: TypedTrb> TrbRing<T> {
pub fn new() -> Self {
Self {
// TODO: What size and count should this be.
segments: alloc::vec![TrbRingSegment::new(100)],
enqueue_pointer: TrbPointer::default(),
// Start with this as true so we are flipping bits from 0 (default) to 1
// to mark the enqueue pointer.
cycle_bit: true,
pending_futures: VecDeque::new(),
}
}
pub fn physical_base_address(&self) -> usize {
self.segments[0].physical_address()
}
fn physical_address_of_enqueue_pointer(&self) -> usize {
self.segments[self.enqueue_pointer.segment_index].physical_address()
+ self.enqueue_pointer.segment_physical_offset()
}
pub fn enqueue_trb(&mut self, trb: TransferRequestBlock) -> TrbFuture<T> {
let paddr = self.physical_address_of_enqueue_pointer();
*self.next_trb_ref() = trb.with_cycle(self.cycle_bit);
self.advance_enqueue_pointer();
let future = TrbFuture::new(paddr);
self.pending_futures.push_back(future.clone());
future
}
fn next_trb_ref(&mut self) -> &mut TransferRequestBlock {
&mut self.segments[self.enqueue_pointer.segment_index][self.enqueue_pointer.segment_offset]
}
fn advance_enqueue_pointer(&mut self) {
self.enqueue_pointer.segment_offset += 1;
if self.enqueue_pointer.segment_offset
== self.segments[self.enqueue_pointer.segment_index].len() - 1
{
// We have reached the end of the segment, insert a link trb.
// Increment the segment index with wrapping.
let next_segment_index =
if self.enqueue_pointer.segment_index + 1 == self.segments.len() {
0
} else {
self.enqueue_pointer.segment_index + 1
};
let next_segment_pointer = self.segments[next_segment_index].physical_address();
let toggle_cycle = next_segment_index == 0;
*self.next_trb_ref() = TrbLink::new()
.with_ring_segment_pointer(next_segment_pointer as u64)
.with_cycle(self.cycle_bit)
.with_toggle_cycle(toggle_cycle)
.to_trb();
// Flip toggle cycle bit if necessary.
self.cycle_bit ^= toggle_cycle;
self.enqueue_pointer = TrbPointer {
segment_index: next_segment_index,
segment_offset: 0,
};
}
}
pub fn handle_completion(&mut self, completion_trb: T) {
let trb = completion_trb.to_trb();
let paddr = trb.parameter() as usize;
let completion = self.pending_futures.pop_front().unwrap();
let mut completion = completion.state.lock();
// TODO: Handle recovery scenarios here.
assert!(
completion.physical_address == paddr,
"Got an unexpected command completion. Expected: {:0x}, Got: {:0x}",
completion.physical_address,
paddr
);
completion.response = Some(completion_trb);
if let Some(waker) = &completion.waker {
waker.wake_by_ref();
}
}
}
pub struct CommandRing {
pub trb_ring: TrbRing<TrbCommandCompletion>,
doorbell: DoorbellPointer,
}
impl CommandRing {
pub fn new(doorbell: DoorbellPointer) -> Self {
Self {
trb_ring: TrbRing::new(),
doorbell,
}
}
// We have to explicitly return a future her
pub fn enqueue_command(&mut self, command: impl TypedTrb) -> TrbFuture<TrbCommandCompletion> {
let fut = self.trb_ring.enqueue_trb(command.to_trb());
// Command Doorbell is always 0.
self.doorbell.ring(0);
fut
}
}

View file

@ -18,7 +18,7 @@ if [[ $1 == "debug" ]]; then
fi fi
# Use machine q35 to access PCI devices. # Use machine q35 to access PCI devices.
qemu-system-x86_64 -machine q35 -d guest_errors -m 1G -serial stdio -hda ${BUILD_DIR}/disk.img ${QEMU_ARGS} -device nec-usb-xhci,id=xhci -device usb-kbd,bus=xhci.0 ~/.local/bin/qemu-system-x86_64 -machine q35 -d guest_errors -m 1G -serial stdio -hda ${BUILD_DIR}/disk.img ${QEMU_ARGS} -device nec-usb-xhci,id=xhci -device usb-kbd,bus=xhci.0
popd popd
# Extra options to add to this script in the future. # Extra options to add to this script in the future.

View file

@ -18,6 +18,8 @@ void DriverManager::WriteMessage(uint64_t irq_num, IpcMessage&& message) {
return; return;
} }
dbgln("IRQ offset {x}", offset);
driver_list_[offset]->Send(glcr::Move(message)); driver_list_[offset]->Send(glcr::Move(message));
} }