diff --git a/rust/Cargo.lock b/rust/Cargo.lock index aec4469..33e3fc5 100644 --- a/rust/Cargo.lock +++ b/rust/Cargo.lock @@ -2,12 +2,6 @@ # It is not intended for manual editing. version = 4 -[[package]] -name = "autocfg" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" - [[package]] name = "bitfield-struct" version = "0.8.0" @@ -80,11 +74,10 @@ dependencies = [ [[package]] name = "lock_api" -version = "0.4.12" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" +checksum = "224399e74b87b5f3557511d98dff8b14089b3dadafcab6bb93eab67d3aace965" dependencies = [ - "autocfg", "scopeguard", ] @@ -105,9 +98,9 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.20" +version = "0.2.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f12335488a2f3b0a83b14edad48dca9879ce89b2edd10e80237e4e852dd645e" +checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" dependencies = [ "proc-macro2", "syn", @@ -115,18 +108,18 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.86" +version = "1.0.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" +checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8" dependencies = [ "unicode-ident", ] [[package]] name = "quote" -version = "1.0.36" +version = "1.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" +checksum = "a338cc41d27e6cc6dce6cefc13a0729dfbb81c262b1f519331575dd80ef3067f" dependencies = [ "proc-macro2", ] @@ -148,9 +141,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.72" +version = "2.0.111" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc4b9b9bf2add8093d3f2c0204471e951b2285580335de42f9d2534f3ae7a8af" +checksum = "390cc9a294ab71bdb1aa2e99d13be9c753cd2d7bd6560c77118597410c4d2e87" dependencies = [ "proc-macro2", "quote", @@ -178,15 +171,15 @@ dependencies = [ [[package]] name = "unicode-ident" -version = "1.0.12" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" +checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5" [[package]] name = "unicode-segmentation" -version = "1.11.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202" +checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" [[package]] name = "victoriafalls" @@ -200,12 +193,21 @@ dependencies = [ "yunqc", ] +[[package]] +name = "volatile" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af8ca9a5d4debca0633e697c88269395493cebf2e10db21ca2dbde37c1356452" + [[package]] name = "voyageurs" version = "0.1.0" dependencies = [ "bitfield-struct 0.12.1", "mammoth", + "pci", + "volatile", + "yellowstone-yunq", ] [[package]] diff --git a/rust/lib/mammoth/src/mem.rs b/rust/lib/mammoth/src/mem.rs index 7b81806..6a67976 100644 --- a/rust/lib/mammoth/src/mem.rs +++ b/rust/lib/mammoth/src/mem.rs @@ -78,6 +78,10 @@ impl MemoryRegion { }) } + pub fn vaddr(&self) -> usize { + self.virt_addr as usize + } + pub fn slice(&self) -> &[T] { unsafe { slice::from_raw_parts( @@ -246,11 +250,11 @@ pub fn map_cap_and_leak(mem_cap: Capability) -> u64 { vaddr } -pub fn map_direct_physical_and_leak(paddr: u64, size: u64) -> u64 { - let mem_cap = syscall::memory_object_direct_physical(paddr, size).unwrap(); +pub fn map_direct_physical_and_leak(paddr: usize, size: usize) -> *mut T { + let mem_cap = syscall::memory_object_direct_physical(paddr as u64, size as u64).unwrap(); let vaddr = syscall::address_space_map(&mem_cap).unwrap(); mem_cap.release(); - vaddr + vaddr as *mut T } pub fn map_physical_and_leak(size: u64) -> (u64, u64) { diff --git a/rust/lib/mammoth/src/physical_box.rs b/rust/lib/mammoth/src/physical_box.rs index 692eed1..963b6ec 100644 --- a/rust/lib/mammoth/src/physical_box.rs +++ b/rust/lib/mammoth/src/physical_box.rs @@ -4,17 +4,34 @@ use core::{ ptr::NonNull, }; -use alloc::{slice, vec::Vec}; +use alloc::{boxed::Box, slice, vec::Vec}; use crate::mem::MemoryRegion; pub struct PhysicalBox { data: NonNull, + #[allow(dead_code)] region: MemoryRegion, physical_address: usize, _marker: PhantomData, } +impl PhysicalBox { + pub fn new(data: T) -> Self { + let (memory_region, paddr) = + MemoryRegion::contiguous_physical(size_of::() as u64).expect("Failed to allocate"); + // UNWRAP: We know this isn't null. + let ptr = NonNull::new(memory_region.mut_ptr_at_offset(0)).unwrap(); + unsafe { ptr.write(data) }; + Self { + data: ptr, + region: memory_region, + physical_address: paddr as usize, + _marker: PhantomData, + } + } +} + impl PhysicalBox { pub fn physical_address(&self) -> usize { self.physical_address @@ -50,7 +67,7 @@ impl PhysicalBox<[T]> { { let layout = core::alloc::Layout::array::(len).expect("Layout overflow"); - // TODO: Implement a function like alloc that takes a layout. let (memory_region, paddr) = + // TODO: Implement a function like alloc that takes a layout. let (memory_region, paddr) = MemoryRegion::contiguous_physical(layout.size() as u64).expect("Failed to allocate"); @@ -122,6 +139,13 @@ where } } +/// SAFETY: We are the only owner of this pointer. +unsafe impl Send for PhysicalBox where Box: Send {} + +/// SAFETY: You must have a mutable reference to this +/// type to modify the data at the pointer. +unsafe impl Sync for PhysicalBox where Box: Sync {} + impl Drop for PhysicalBox { fn drop(&mut self) { // SAFETY: diff --git a/rust/lib/pci/src/device.rs b/rust/lib/pci/src/device.rs index ab1b25d..948cd04 100644 --- a/rust/lib/pci/src/device.rs +++ b/rust/lib/pci/src/device.rs @@ -72,15 +72,15 @@ impl PciDevice { control.capable_address_64(), "We don't handle the non-64bit case for MSI yet." ); - assert!( - control.multi_message_capable() == 0, - "We don't yet handle multi-message capable devices." - ); + + if control.multi_message_capable() != 0 { + mammoth::debug!("WARN: We don't yet handle multi-message capable devices."); + } // FIXME: These probably need to be volatile writes. let header: &mut PciDeviceHeader = self.memory_region.as_mut(); header.command = header.command.with_interrupt_disable(true); - msi_cap.msi_control = control.with_msi_enable(true); + msi_cap.msi_control = control.with_msi_enable(true).with_multi_message_enable(0); // For setting addr and data field, see intel ref // Vol 3. Section 11.11 diff --git a/rust/sys/denali/src/ahci/controller.rs b/rust/sys/denali/src/ahci/controller.rs index 2a402f0..08bd082 100644 --- a/rust/sys/denali/src/ahci/controller.rs +++ b/rust/sys/denali/src/ahci/controller.rs @@ -1,3 +1,5 @@ +use core::ffi::c_void; + use alloc::sync::Arc; use mammoth::{ cap::Capability, @@ -27,7 +29,8 @@ impl AhciController { let pci_device = PciDevice::from_cap(pci_memory).unwrap(); let hba_vaddr = - mem::map_direct_physical_and_leak(pci_device.header().bars[5] as u64, 0x1100); + mem::map_direct_physical_and_leak(pci_device.header().bars[5] as usize, 0x1100) + as *mut c_void as u64; let hba = unsafe { (hba_vaddr as *mut AhciHba).as_mut().unwrap() }; let mut controller = Self { pci_device: Mutex::new(pci_device), diff --git a/rust/sys/voyageurs/Cargo.toml b/rust/sys/voyageurs/Cargo.toml index 084d083..bd65fff 100644 --- a/rust/sys/voyageurs/Cargo.toml +++ b/rust/sys/voyageurs/Cargo.toml @@ -6,3 +6,10 @@ edition = "2024" [dependencies] bitfield-struct = "0.12" mammoth = { path = "../../lib/mammoth/" } +pci = { path = "../../lib/pci" } +volatile = "0.6.1" +yellowstone-yunq = { version = "0.1.0", path = "../../lib/yellowstone" } + +[features] +default = ["debug"] +debug = [] diff --git a/rust/sys/voyageurs/src/main.rs b/rust/sys/voyageurs/src/main.rs index e9e5dd6..92a3ee1 100644 --- a/rust/sys/voyageurs/src/main.rs +++ b/rust/sys/voyageurs/src/main.rs @@ -5,12 +5,47 @@ extern crate alloc; mod xhci; -use mammoth::{debug, define_entry, zion::z_err_t}; +use alloc::sync::Arc; +use mammoth::{ + cap::Capability, + debug, define_entry, + sync::Mutex, + task::{Executor, Task}, + zion::z_err_t, +}; +use pci::PciDevice; +use xhci::driver::XHCIDriver; define_entry!(); #[unsafe(no_mangle)] extern "C" fn main() -> z_err_t { - debug!("In Voyageurs"); + #[cfg(feature = "debug")] + debug!("Voyageurs Starting."); + + let yellowstone = yellowstone_yunq::from_init_endpoint(); + + let xhci_info = yellowstone + .get_xhci_info() + .expect("Failed to get XHCI info from yellowstone."); + + let pci_device = PciDevice::from_cap(Capability::take(xhci_info.xhci_region)).unwrap(); + + let xhci_driver = Arc::new(XHCIDriver::from_pci_device(pci_device)); + + let executor = Arc::new(Mutex::new(Executor::new())); + + let driver_clone = xhci_driver.clone(); + let spawner = executor.clone().lock().new_spawner(); + let interrupt_thread = mammoth::thread::spawn(move || driver_clone.interrupt_loop(spawner)); + + executor + .clone() + .lock() + .spawn(Task::new(async move { xhci_driver.startup().await })); + + executor.clone().lock().run(); + interrupt_thread.join().unwrap(); + 0 } diff --git a/rust/sys/voyageurs/src/xhci/device_context_base_array.rs b/rust/sys/voyageurs/src/xhci/device_context_base_array.rs new file mode 100644 index 0000000..6c32179 --- /dev/null +++ b/rust/sys/voyageurs/src/xhci/device_context_base_array.rs @@ -0,0 +1,104 @@ +use alloc::boxed::Box; +use mammoth::physical_box::PhysicalBox; + +use crate::xhci::{ + data_structures::{ + DeviceContext, EndpointContextFields, EndpointState, EndpointType, InputContext, + TRDequeuePointer, TrbTransferEvent, + }, + registers::DoorbellPointer, + trb_ring::TrbRing, +}; + +struct DeviceContextBaseArray(PhysicalBox<[u64]>); + +impl DeviceContextBaseArray { + pub fn new(max_slots: u8) -> Self { + Self(PhysicalBox::default_with_count(0, max_slots as usize + 1)) + } +} + +pub struct DeviceSlot { + device_context: PhysicalBox, + endpoint_0_transfer_ring: TrbRing, +} + +impl DeviceSlot { + fn new() -> Self { + Self { + device_context: PhysicalBox::new(DeviceContext::default()), + endpoint_0_transfer_ring: TrbRing::new(), + } + } +} + +pub struct DeviceSlotManager { + device_context_base_array: DeviceContextBaseArray, + slots: Box<[Option]>, + doorbells: Box<[DoorbellPointer]>, +} + +impl DeviceSlotManager { + pub fn new(max_slots: u8, doorbells: Box<[DoorbellPointer]>) -> Self { + assert!( + doorbells.len() == max_slots as usize, + "Got an incorrect doorbell slice size." + ); + Self { + device_context_base_array: DeviceContextBaseArray::new(max_slots), + slots: core::iter::repeat_with(|| None) + .take(max_slots as usize) + .collect(), + doorbells, + } + } + + pub fn device_context_base_array_physical_address(&self) -> usize { + self.device_context_base_array.0.physical_address() + } + + /// Prepares a slot and an input context for an address device command. + /// + /// Follows section 4.6.5 of the XHCI spec. + pub fn prep_slot_for_address_device( + &mut self, + slot_id: u8, + port_number: u8, + ) -> PhysicalBox { + // TODO: Ensure alignment + let device_slot = DeviceSlot::new(); + let mut input_context = PhysicalBox::new(InputContext::default()); + + // The Add Context flags for the Slot Context and the Endpoint 0 Context shall be set to ‘1’. + input_context.input_control_context.add_context_flags = 0x3; + + // See XHCI 4.5.2 for information + input_context.slot_context.fields = input_context + .slot_context + .fields + .with_root_hub_port_number(port_number) + .with_route_string(0) + .with_context_entries(1) + .with_interrupter_target(0); + + // The Endpoint 0 Context data structure in the + // Input Context shall define valid values for the TR Dequeue Pointer, EP Type, Error + // Count (CErr), and Max Packet Size fields. The MaxPStreams, Max Burst Size, and + // EP State values shall be cleared to '0' + input_context.endpoint_context_0.tr_deque_pointer = TRDequeuePointer::new() + .with_pointer(device_slot.endpoint_0_transfer_ring.physical_base_address() as u64) + .with_dequeue_cycle_state(true); + + input_context.endpoint_context_0.fields = EndpointContextFields::new() + .with_endpoint_type(EndpointType::Control) + .with_max_primary_streams(0) + .with_max_burst_size(0) + .with_endpoint_state(EndpointState::Disabled); + + self.device_context_base_array.0[slot_id as usize] = + device_slot.device_context.physical_address() as u64; + self.slots[slot_id as usize - 1] = Some(device_slot); + + input_context + } +} diff --git a/rust/sys/voyageurs/src/xhci/driver.rs b/rust/sys/voyageurs/src/xhci/driver.rs new file mode 100644 index 0000000..746ee16 --- /dev/null +++ b/rust/sys/voyageurs/src/xhci/driver.rs @@ -0,0 +1,327 @@ +use core::slice; + +use alloc::sync::Arc; +use mammoth::cap::Capability; +use mammoth::mem::MemoryRegion; +use mammoth::sync::Mutex; +use mammoth::task::Spawner; +use mammoth::task::Task; +use mammoth::write_unaligned_volatile; + +use super::registers::{self}; +use crate::xhci::data_structures::CommandCompletionCode; +use crate::xhci::data_structures::TrbAddressDeviceCommand; +use crate::xhci::data_structures::TrbCommandCompletion; +use crate::xhci::data_structures::TrbEnableSlotCommand; +use crate::xhci::data_structures::TrbNoOp; +use crate::xhci::data_structures::TrbPortStatusChangeEvent; +use crate::xhci::data_structures::TrbType; +use crate::xhci::data_structures::TypedTrb; +use crate::xhci::device_context_base_array::DeviceSlotManager; +use crate::xhci::event_ring::EventRing; +use crate::xhci::registers::DoorbellPointer; +use crate::xhci::registers::HostControllerOperationalWrapper; +use crate::xhci::registers::PortStatusAndControl; +use crate::xhci::trb_ring::CommandRing; + +pub struct XHCIDriver { + #[allow(dead_code)] + pci_device: pci::PciDevice, + capabilities: registers::HostControllerCapabilities, + operational: HostControllerOperationalWrapper, + registers_region: MemoryRegion, + command_ring: Mutex, + event_ring: Mutex, + device_slot_manager: Mutex, + irq_port_cap: Capability, +} + +impl XHCIDriver { + pub fn from_pci_device(mut pci_device: pci::PciDevice) -> Self { + // however the RTSOFF and DBOFF Registers shall position the Runtime and + // Doorbell Registers to reside on their own respective virtual memory pages. The + // BAR0 size shall provide space that is sufficient to cover the offset between the + // respective register spaces (Capability, Operational, Runtime, etc.) and the + // register spaces themselves (e.g. a minimum of 3 virtual memory pages). + // If virtualization is not supported, all xHCI register spaces may reside on a single + // page pointed to by the BAR0. + let three_pages = 0x3000; + let address = + ((pci_device.header().bars[1] as usize) << 32) | (pci_device.header().bars[0] as usize); + let registers_region = MemoryRegion::direct_physical(address as u64, three_pages).unwrap(); + let irq_port_cap = pci_device.register_msi().unwrap(); + + let (operational, capabilities) = HostControllerOperationalWrapper::new(address as usize); + + let max_slots = capabilities.params_1.max_device_slots(); + let doorbell_physical = address as usize + capabilities.doorbell_offset as usize; + let (command_doorbell, slot_doorbells) = + DoorbellPointer::create_command_and_slots(doorbell_physical, max_slots); + + let mut driver = Self { + pci_device, + capabilities, + operational, + registers_region, + command_ring: Mutex::new(CommandRing::new(command_doorbell)), + event_ring: Mutex::new(EventRing::new()), + device_slot_manager: Mutex::new(DeviceSlotManager::new(max_slots, slot_doorbells)), + irq_port_cap, + }; + driver.initialize(); + driver + } + + fn interrupters(&self) -> &mut [registers::InterrupterRegisterSet] { + // See Table 5-35: Host Controller Runtime Registers + const INTERRUPTER_OFFSET_FROM_RUNTIME: u32 = 0x20; + + let interrupter_offset = (self.capabilities.runtime_register_space_offset + + INTERRUPTER_OFFSET_FROM_RUNTIME) as usize; + + // SAFETY: The XHCI spec says so? + unsafe { + slice::from_raw_parts_mut( + self.registers_region.mut_ptr_at_offset(interrupter_offset), + self.capabilities.params_1.max_interrupters() as usize, + ) + } + } + + fn initialize(&mut self) { + #[cfg(feature = "debug")] + mammoth::debug!("Stopping XHCI Controller."); + + // Stop the host controller. + self.operational + .update_command(|cmd| cmd.with_run_stop(false)); + + #[cfg(feature = "debug")] + mammoth::debug!("Waiting for controller to halt."); + + // Sleep until the controller is halted. + let mut status = self.operational.read_status(); + while !status.host_controller_halted() { + // TODO: Sleep for how long? + mammoth::syscall::thread_sleep(50).unwrap(); + status = self.operational.read_status(); + } + + #[cfg(feature = "debug")] + mammoth::debug!("Resetting Controller."); + + self.operational + .update_command(|cmd| cmd.with_host_controller_reset(true)); + + let mut command: registers::UsbCommand = self.operational.read_command(); + while command.host_controller_reset() { + // TODO: Sleep for how long? + mammoth::syscall::thread_sleep(50).unwrap(); + command = self.operational.read_command(); + } + + #[cfg(feature = "debug")] + mammoth::debug!("XHCI Controller Reset, waiting ready."); + + let mut status = self.operational.read_status(); + while status.controller_not_ready() { + // TODO: Sleep for how long? + mammoth::syscall::thread_sleep(50).unwrap(); + status = self.operational.read_status(); + } + + #[cfg(feature = "debug")] + mammoth::debug!("XHCI Controller Ready."); + + #[cfg(feature = "debug")] + mammoth::debug!("Setting Command Ring"); + + self.operational.set_command_ring_dequeue_pointer( + self.command_ring.lock().trb_ring.physical_base_address(), + true, + ); + + #[cfg(feature = "debug")] + mammoth::debug!("Setting DCBA."); + + self.operational + .set_device_context_base_address_array_pointer( + self.device_slot_manager + .lock() + .device_context_base_array_physical_address(), + ); + // We tell the controller that we can support as many slots as it does because + // we allocate a full 4K page to the DCBA, which is 256 entries and the max + // slots are 255. + self.operational.update_configure(|cfg| { + cfg.with_max_device_slots_enabled(self.capabilities.params_1.max_device_slots()) + }); + + assert!( + self.capabilities.params_2.max_scratchpad_buffers() == 0, + "Unsupported scratchpad buffers." + ); + + #[cfg(feature = "debug")] + mammoth::debug!("Setting up initial event ring."); + + let interrupter0 = &mut self.interrupters()[0]; + // SAFETY: + // - The HC was halted above. + // - THe segment table is size 1. + unsafe { + let event_ring = self.event_ring.lock(); + interrupter0.set_event_ring( + event_ring.segment_table(), + event_ring.erdp_physical_address(), + ); + } + write_unaligned_volatile!( + interrupter0, + interrupter_moderation, + registers::InterrupterModeration::new() + .with_interrupt_moderation_interval(4000) + .with_interrupt_moderation_counter(0) + ); + write_unaligned_volatile!( + interrupter0, + interrupter_management, + registers::InterrupterManagement::new().with_interrupt_enabled(true) + ); + + self.operational + .update_command(|cmd| cmd.with_run_stop(true).with_interrupter_enable(true)); + + #[cfg(feature = "debug")] + mammoth::debug!("Enabled interrupts and controller."); + } + + pub fn interrupt_loop(self: Arc, spawner: Spawner) { + loop { + let _ = mammoth::syscall::port_recv(&self.irq_port_cap, &mut [], &mut []).unwrap(); + #[cfg(feature = "debug")] + mammoth::debug!("Received Interrupt."); + self.interrupters()[0].interrupter_management = self.interrupters()[0] + .interrupter_management + .with_interrupt_pending(true); + + // TODO: Make event ring own its interrupter. + let mut event_ring = self.event_ring.lock(); + + while let Some(trb) = event_ring.get_next() { + match trb.trb_type() { + TrbType::TransferEvent => { + todo!("Handle Transfer") + } + TrbType::CommandCompletionEvent => { + self.command_ring + .lock() + .trb_ring + .handle_completion(TrbCommandCompletion::from_trb(trb)); + } + TrbType::PortStatusChangeEvent => { + let trb = TrbPortStatusChangeEvent::from_trb(trb); + let self_clone = self.clone(); + spawner.spawn(Task::new(async move { + self_clone.port_status_change(trb).await + })); + } + _ => { + panic!("Unhandled event type: {:?}", trb.trb_type()); + } + } + } + event_ring.update_dequeue_pointer(&mut self.interrupters()[0]); + } + } + + async fn send_command(&self, trb: impl TypedTrb) -> TrbCommandCompletion { + // Split the future and the await so the lock is dropped before we await. + let future = { self.command_ring.lock().enqueue_command(trb) }; + future.await + } + + pub async fn startup(&self) { + #[cfg(feature = "debug")] + mammoth::debug!("Sending no op command."); + + let result = self.send_command(TrbNoOp::new()).await; + + assert!(result.completion_code() == CommandCompletionCode::Success.into_bits()); + + #[cfg(feature = "debug")] + mammoth::debug!("Successfully tested no op command."); + + #[cfg(feature = "debug")] + mammoth::debug!("Resetting all connected ports."); + for port_index in 0..self.operational.num_ports() { + self.operational + .update_port_status(port_index, |p| p.clear_change_bits()); + } + + for port_index in 0..self.operational.num_ports() { + let status = self.operational.get_port(port_index).status_and_control; + if status.port_power() && status.current_connect_status() { + mammoth::debug!("Resetting port {}", port_index); + self.operational.update_port_status(port_index, |_| { + PortStatusAndControl::new() + .with_port_reset(true) + .with_port_power(true) + }); + } + } + } + + async fn port_status_change(self: Arc, status_change: TrbPortStatusChangeEvent) { + // Ports are indexed from 1. + let port_id = status_change.port_id(); + let port_index = (port_id - 1) as usize; + + let port_status = self + .operational + .get_port(port_index as usize) + .status_and_control; + + #[cfg(feature = "debug")] + mammoth::debug!("Port status change for port {}", port_id); + + if !port_status.port_reset_change() { + mammoth::debug!( + "Unknown port status event, not handling. status= {:?}", + port_status + ); + return; + } + + self.operational + .update_port_status(port_index, |s| s.clear_change_bits()); + + #[cfg(feature = "debug")] + mammoth::debug!("Enabling slot."); + + let resp = self.send_command(TrbEnableSlotCommand::new()).await; + assert!(resp.completion_code() == CommandCompletionCode::Success.into_bits()); + + let slot = resp.slot_id(); + + #[cfg(feature = "debug")] + mammoth::debug!("Creating slot data structures in slot {}.", slot); + + let input_context = self + .device_slot_manager + .lock() + .prep_slot_for_address_device(slot, port_id); + + #[cfg(feature = "debug")] + mammoth::debug!("Sending address device."); + + let resp = self + .send_command( + TrbAddressDeviceCommand::new() + .with_slot_id(slot) + .with_input_context_pointer(input_context.physical_address() as u64), + ) + .await; + assert!(resp.completion_code() == CommandCompletionCode::Success.into_bits()); + } +} diff --git a/rust/sys/voyageurs/src/xhci/event_ring.rs b/rust/sys/voyageurs/src/xhci/event_ring.rs new file mode 100644 index 0000000..4ab72e8 --- /dev/null +++ b/rust/sys/voyageurs/src/xhci/event_ring.rs @@ -0,0 +1,75 @@ +use alloc::vec::Vec; + +use crate::xhci::{ + data_structures::{EventRingSegmentTable, TransferRequestBlock, TrbRingSegment}, + registers::InterrupterRegisterSet, + trb_ring::TrbPointer, +}; + +pub struct EventRing { + segment_table: EventRingSegmentTable, + segments: Vec, + cycle_bit: bool, + trb_pointer: TrbPointer, +} + +impl EventRing { + pub fn new() -> Self { + // Software maintains an Event Ring Consumer Cycle State (CCS) bit, initializing it + // to ‘1’... + let cycle_bit = true; + let mut event_ring = Self { + segment_table: EventRingSegmentTable::new(1), + segments: [TrbRingSegment::new(100)].into(), + cycle_bit, + trb_pointer: TrbPointer::default(), + }; + + event_ring.segment_table[0].from_trb_ring(&event_ring.segments[0]); + + event_ring + } + + pub fn segment_table(&self) -> &EventRingSegmentTable { + &self.segment_table + } + + pub fn erdp_physical_address(&self) -> usize { + self.segments[self.trb_pointer.segment_index].physical_address() + + self.trb_pointer.segment_physical_offset() + } + + fn current_trb(&self) -> TransferRequestBlock { + // TODO: These should be volatile reads. + self.segments[self.trb_pointer.segment_index][self.trb_pointer.segment_offset] + } + + fn increment_pointer(&mut self) { + self.trb_pointer.segment_offset += 1; + + if self.trb_pointer.segment_offset == self.segments[self.trb_pointer.segment_index].len() { + self.trb_pointer.segment_index += 1; + self.trb_pointer.segment_offset = 0; + + if self.trb_pointer.segment_index == self.segments.len() { + // Wrap around to front. + self.trb_pointer.segment_index = 0; + self.cycle_bit = !self.cycle_bit; + } + } + } + + pub fn get_next(&mut self) -> Option { + let curr = self.current_trb(); + if curr.cycle() != self.cycle_bit { + None + } else { + self.increment_pointer(); + Some(curr) + } + } + + pub fn update_dequeue_pointer(&self, interrupter: &mut InterrupterRegisterSet) { + interrupter.update_dequeue_pointer(self.erdp_physical_address()); + } +} diff --git a/rust/sys/voyageurs/src/xhci/mod.rs b/rust/sys/voyageurs/src/xhci/mod.rs index 008e36e..f42fa09 100644 --- a/rust/sys/voyageurs/src/xhci/mod.rs +++ b/rust/sys/voyageurs/src/xhci/mod.rs @@ -1,2 +1,7 @@ -pub mod data_structures; -pub mod registers; +mod data_structures; +mod device_context_base_array; +pub mod driver; +mod event_ring; +mod registers; +mod trb_ring; + diff --git a/rust/sys/voyageurs/src/xhci/trb_ring.rs b/rust/sys/voyageurs/src/xhci/trb_ring.rs new file mode 100644 index 0000000..e5789da --- /dev/null +++ b/rust/sys/voyageurs/src/xhci/trb_ring.rs @@ -0,0 +1,190 @@ +use core::task::{Poll, Waker}; + +use alloc::{collections::vec_deque::VecDeque, sync::Arc, vec::Vec}; +use mammoth::sync::Mutex; + +use crate::xhci::{ + data_structures::{ + TransferRequestBlock, TrbCommandCompletion, TrbLink, TrbRingSegment, TypedTrb, + }, + registers::DoorbellPointer, +}; + +struct TrbFutureState { + /// Physical Address for the enqueued TRB. + /// Used for sanity checking. + physical_address: usize, + + waker: Option, + response: Option, +} + +#[derive(Clone)] +pub struct TrbFuture { + state: Arc>>, +} + +impl TrbFuture { + fn new(paddr: usize) -> Self { + Self { + state: Arc::new(Mutex::new(TrbFutureState { + physical_address: paddr, + waker: None, + response: None, + })), + } + } +} + +impl Future for TrbFuture { + type Output = T; + + fn poll( + self: core::pin::Pin<&mut Self>, + cx: &mut core::task::Context<'_>, + ) -> core::task::Poll { + let mut state = self.state.lock(); + match state.response { + Some(trb) => Poll::Ready(trb), + None => { + state.waker = Some(cx.waker().clone()); + Poll::Pending + } + } + } +} + +#[derive(Default, Copy, Clone, Debug)] +pub struct TrbPointer { + /// Index into the vector of trb segments. + pub segment_index: usize, + /// Index into the specific segment. + /// This is a TransferRequestBlock index, + /// to get the physical_offset use segment_physical_offset() + pub segment_offset: usize, +} + +impl TrbPointer { + pub fn segment_physical_offset(&self) -> usize { + self.segment_offset * size_of::() + } +} + +pub struct TrbRing { + segments: Vec, + enqueue_pointer: TrbPointer, + cycle_bit: bool, + pending_futures: VecDeque>, +} + +impl TrbRing { + pub fn new() -> Self { + Self { + // TODO: What size and count should this be. + segments: alloc::vec![TrbRingSegment::new(100)], + enqueue_pointer: TrbPointer::default(), + // Start with this as true so we are flipping bits from 0 (default) to 1 + // to mark the enqueue pointer. + cycle_bit: true, + pending_futures: VecDeque::new(), + } + } + + pub fn physical_base_address(&self) -> usize { + self.segments[0].physical_address() + } + + fn physical_address_of_enqueue_pointer(&self) -> usize { + self.segments[self.enqueue_pointer.segment_index].physical_address() + + self.enqueue_pointer.segment_physical_offset() + } + + pub fn enqueue_trb(&mut self, trb: TransferRequestBlock) -> TrbFuture { + let paddr = self.physical_address_of_enqueue_pointer(); + *self.next_trb_ref() = trb.with_cycle(self.cycle_bit); + self.advance_enqueue_pointer(); + let future = TrbFuture::new(paddr); + self.pending_futures.push_back(future.clone()); + future + } + + fn next_trb_ref(&mut self) -> &mut TransferRequestBlock { + &mut self.segments[self.enqueue_pointer.segment_index][self.enqueue_pointer.segment_offset] + } + + fn advance_enqueue_pointer(&mut self) { + self.enqueue_pointer.segment_offset += 1; + + if self.enqueue_pointer.segment_offset + == self.segments[self.enqueue_pointer.segment_index].len() - 1 + { + // We have reached the end of the segment, insert a link trb. + + // Increment the segment index with wrapping. + let next_segment_index = + if self.enqueue_pointer.segment_index + 1 == self.segments.len() { + 0 + } else { + self.enqueue_pointer.segment_index + 1 + }; + + let next_segment_pointer = self.segments[next_segment_index].physical_address(); + let toggle_cycle = next_segment_index == 0; + + *self.next_trb_ref() = TrbLink::new() + .with_ring_segment_pointer(next_segment_pointer as u64) + .with_cycle(self.cycle_bit) + .with_toggle_cycle(toggle_cycle) + .to_trb(); + + // Flip toggle cycle bit if necessary. + self.cycle_bit ^= toggle_cycle; + + self.enqueue_pointer = TrbPointer { + segment_index: next_segment_index, + segment_offset: 0, + }; + } + } + + pub fn handle_completion(&mut self, completion_trb: T) { + let trb = completion_trb.to_trb(); + let paddr = trb.parameter() as usize; + let completion = self.pending_futures.pop_front().unwrap(); + let mut completion = completion.state.lock(); + // TODO: Handle recovery scenarios here. + assert!( + completion.physical_address == paddr, + "Got an unexpected command completion. Expected: {:0x}, Got: {:0x}", + completion.physical_address, + paddr + ); + completion.response = Some(completion_trb); + + if let Some(waker) = &completion.waker { + waker.wake_by_ref(); + } + } +} + +pub struct CommandRing { + pub trb_ring: TrbRing, + doorbell: DoorbellPointer, +} + +impl CommandRing { + pub fn new(doorbell: DoorbellPointer) -> Self { + Self { + trb_ring: TrbRing::new(), + doorbell, + } + } + + // We have to explicitly return a future her + pub fn enqueue_command(&mut self, command: impl TypedTrb) -> TrbFuture { + let fut = self.trb_ring.enqueue_trb(command.to_trb()); + // Command Doorbell is always 0. + self.doorbell.ring(0); + fut + } +} diff --git a/scripts/qemu.sh b/scripts/qemu.sh index 22d6f98..a68570d 100755 --- a/scripts/qemu.sh +++ b/scripts/qemu.sh @@ -18,7 +18,7 @@ if [[ $1 == "debug" ]]; then fi # Use machine q35 to access PCI devices. -qemu-system-x86_64 -machine q35 -d guest_errors -m 1G -serial stdio -hda ${BUILD_DIR}/disk.img ${QEMU_ARGS} -device nec-usb-xhci,id=xhci -device usb-kbd,bus=xhci.0 +~/.local/bin/qemu-system-x86_64 -machine q35 -d guest_errors -m 1G -serial stdio -hda ${BUILD_DIR}/disk.img ${QEMU_ARGS} -device nec-usb-xhci,id=xhci -device usb-kbd,bus=xhci.0 popd # Extra options to add to this script in the future. diff --git a/zion/interrupt/driver_manager.cpp b/zion/interrupt/driver_manager.cpp index 3aa2bc5..b991004 100644 --- a/zion/interrupt/driver_manager.cpp +++ b/zion/interrupt/driver_manager.cpp @@ -18,6 +18,8 @@ void DriverManager::WriteMessage(uint64_t irq_num, IpcMessage&& message) { return; } + dbgln("IRQ offset {x}", offset); + driver_list_[offset]->Send(glcr::Move(message)); }