Compare commits

..

4 commits

17 changed files with 520 additions and 248 deletions

51
'
View file

@ -1,51 +0,0 @@
#![no_std]
#![no_main]
extern crate alloc;
mod xhci;
use alloc::sync::Arc;
use mammoth::{
cap::Capability,
debug, define_entry,
sync::Mutex,
task::{Executor, Task},
zion::z_err_t,
};
use pci::PciDevice;
use xhci::driver::XHCIDriver;
define_entry!();
#[unsafe(no_mangle)]
extern "C" fn main() -> z_err_t {
#[cfg(feature = "debug")]
debug!("Voyageurs Starting.");
let yellowstone = yellowstone_yunq::from_init_endpoint();
let xhci_info = yellowstone
.get_xhci_info()
.expect("Failed to get XHCI info from yellowstone.");
let pci_device = PciDevice::from_cap(Capability::take(xhci_info.xhci_region)).unwrap();
let xhci_driver = Arc::new(XHCIDriver::from_pci_device(pci_device));
let executor = Arc::new(Mutex::new(Executor::new()));
let driver_clone = xhci_driver.clone();
let interrupt_thread = mammoth::thread::spawn(move || driver_clone.interrupt_loop());
let driver_clone = xhci_driver.clone();
executor
.clone()
.lock()
.spawn(Task::new((|| xhci_driver.clone().startup())()));
executor.clone().lock().run();
interrupt_thread.join().unwrap();
0
}

View file

@ -250,8 +250,8 @@ pub fn map_cap_and_leak(mem_cap: Capability) -> u64 {
vaddr
}
pub fn map_direct_physical_and_leak<T>(paddr: u64, size: u64) -> *mut T {
let mem_cap = syscall::memory_object_direct_physical(paddr, size).unwrap();
pub fn map_direct_physical_and_leak<T>(paddr: usize, size: usize) -> *mut T {
let mem_cap = syscall::memory_object_direct_physical(paddr as u64, size as u64).unwrap();
let vaddr = syscall::address_space_map(&mem_cap).unwrap();
mem_cap.release();
vaddr as *mut T

View file

@ -16,6 +16,22 @@ pub struct PhysicalBox<T: ?Sized> {
_marker: PhantomData<T>,
}
impl<T> PhysicalBox<T> {
pub fn new(data: T) -> Self {
let (memory_region, paddr) =
MemoryRegion::contiguous_physical(size_of::<T>() as u64).expect("Failed to allocate");
// UNWRAP: We know this isn't null.
let ptr = NonNull::new(memory_region.mut_ptr_at_offset(0)).unwrap();
unsafe { ptr.write(data) };
Self {
data: ptr,
region: memory_region,
physical_address: paddr as usize,
_marker: PhantomData,
}
}
}
impl<T: ?Sized> PhysicalBox<T> {
pub fn physical_address(&self) -> usize {
self.physical_address
@ -51,16 +67,10 @@ impl<T> PhysicalBox<[T]> {
{
let layout = core::alloc::Layout::array::<T>(len).expect("Layout overflow");
// TODO: Implement a function like alloc that takes a layout. let (memory_region, paddr) =
// TODO: Implement a function like alloc that takes a layout.
let (memory_region, paddr) =
MemoryRegion::contiguous_physical(layout.size() as u64).expect("Failed to allocate");
crate::debug!(
"Physical box allocated: v {:0x} p {:0x}",
memory_region.vaddr(),
paddr
);
let ptr: *mut T = memory_region.mut_ptr_at_offset(0);
for i in 0..len {
unsafe {

View file

@ -29,7 +29,7 @@ impl AhciController {
let pci_device = PciDevice::from_cap(pci_memory).unwrap();
let hba_vaddr =
mem::map_direct_physical_and_leak(pci_device.header().bars[5] as u64, 0x1100)
mem::map_direct_physical_and_leak(pci_device.header().bars[5] as usize, 0x1100)
as *mut c_void as u64;
let hba = unsafe { (hba_vaddr as *mut AhciHba).as_mut().unwrap() };
let mut controller = Self {

View file

@ -0,0 +1,11 @@
use crate::xhci::data_structures::{EndpointContext, SlotContext};
#[repr(C, align(64))]
#[derive(Default)]
pub struct DeviceContext {
slot_context: SlotContext,
endpoint_context_0: EndpointContext,
endpoint_contexts: [EndpointContext; 30],
}
const _: () = assert!(size_of::<DeviceContext>() == 0x400);

View file

@ -1,7 +1,8 @@
use bitfield_struct::bitfield;
use bitfield_struct::{bitenum, bitfield};
#[derive(Debug)]
#[repr(u8)]
#[bitenum]
#[derive(Debug)]
pub enum EndpointState {
/// The endpoint is not operationa.
Disabled = 0,
@ -17,29 +18,15 @@ pub enum EndpointState {
/// The endpoint is not running due to a TRB Error. SW may manipulate the Transfer
/// Ring while in this state.
Error = 4,
#[fallback]
Unknown = 5,
}
impl EndpointState {
const fn from_bits(value: u8) -> Self {
match value {
0 => Self::Disabled,
1 => Self::Running,
2 => Self::Halted,
3 => Self::Stopped,
4 => Self::Error,
_ => Self::Unknown,
}
}
const fn into_bits(self) -> u8 {
self as u8
}
}
#[derive(Debug)]
#[repr(u8)]
#[bitenum]
#[derive(Debug)]
pub enum EndpointType {
#[fallback]
NotValid = 0,
IsochOut = 1,
BulkOut = 2,
@ -50,26 +37,6 @@ pub enum EndpointType {
InterruptIn = 7,
}
impl EndpointType {
const fn from_bits(value: u8) -> Self {
match value {
0 => Self::NotValid,
1 => Self::IsochOut,
2 => Self::BulkOut,
3 => Self::InterruptOut,
4 => Self::Control,
5 => Self::IsochIn,
6 => Self::BulkIn,
7 => Self::InterruptIn,
_ => Self::NotValid,
}
}
const fn into_bits(self) -> u8 {
self as u8
}
}
#[bitfield(u64)]
pub struct EndpointContextFields {
/// Endpoint State (EP State). The Endpoint State identifies the current operational state of the
@ -203,26 +170,37 @@ impl TRDequeuePointer {
self.tr_deque_pointer() << 4
}
pub fn set_pointer(self, tr_deque_pointer: u64) -> TRDequeuePointer {
pub fn set_pointer(&mut self, tr_deque_pointer: u64) {
self.set_tr_deque_pointer(tr_deque_pointer >> 4)
}
pub fn with_pointer(self, tr_deque_pointer: u64) -> Self {
self.with_tr_deque_pointer(tr_deque_pointer >> 4)
}
}
#[repr(C, packed)]
pub struct EndpointContext {
pub fields: u64,
pub tr_deque_pointer: TRDequeuePointer,
#[bitfield(u64)]
struct AdditionalFields {
/// Average TRB Length. This field represents the average Length of the TRBs executed by this
/// endpoint. The value of this field shall be greater than 0. Refer to section 4.14.1.1 and the
/// implementation note TRB Lengths and System Bus Bandwidth for more information.
/// The xHC shall use this parameter to calculate system bus bandwidth requirements
pub average_trb_lenght: u16,
pub average_trb_length: u16,
/// Max Endpoint Service Time Interval Payload Low (Max ESIT Payload Lo). This field indicates
/// the low order 16 bits of the Max ESIT Payload. The Max ESIT Payload represents the total
/// number of bytes this endpoint will transfer during an ESIT. This field is only valid for periodic
/// endpoints. Refer to section 6.2.3.8 for more information.
pub max_esit_payload_lo: u16,
__: [u32; 3],
__: u32,
}
#[repr(C)]
#[derive(Default)]
pub struct EndpointContext {
pub fields: EndpointContextFields,
pub tr_deque_pointer: TRDequeuePointer,
additional_fields: AdditionalFields,
__: u64,
}
const _: () = assert!(size_of::<EndpointContext>() == 0x20);

View file

@ -0,0 +1,33 @@
use bitfield_struct::bitfield;
use crate::xhci::data_structures::{EndpointContext, SlotContext};
#[bitfield(u32)]
pub struct InputControlContextSettings {
configuration_value: u8,
interface_number: u8,
alternate_setting: u8,
__: u8,
}
#[repr(C)]
#[derive(Default)]
pub struct InputControlContext {
pub drop_context_flags: u32,
pub add_context_flags: u32,
__: [u32; 5],
settings: InputControlContextSettings,
}
const _: () = assert!(size_of::<InputControlContext>() == 0x20);
#[repr(C)]
#[derive(Default)]
pub struct InputContext {
pub input_control_context: InputControlContext,
pub slot_context: SlotContext,
pub endpoint_context_0: EndpointContext,
pub endpoint_contexts: [EndpointContext; 30],
}
const _: () = assert!(size_of::<InputContext>() == 0x420);

View file

@ -1,10 +1,15 @@
mod device_context;
mod endpoint_context;
mod event_ring_segment_table;
mod input_context;
mod slot_context;
mod trb;
mod trb_ring_segment;
pub use device_context::*;
pub use endpoint_context::*;
pub use event_ring_segment_table::*;
pub use input_context::*;
pub use slot_context::*;
pub use trb::*;
pub use trb_ring_segment::*;

View file

@ -125,6 +125,7 @@ pub struct SlotContextFields {
}
#[repr(C)]
#[derive(Default)]
pub struct SlotContext {
pub fields: SlotContextFields,
__: u128,

View file

@ -142,6 +142,35 @@ pub struct TrbLink {
impl TypedTrb for TrbLink {}
#[bitfield(u128)]
pub struct TrbTransferEvent {
pub transfer_trb_pointer: u64,
#[bits(24)]
pub trb_transfer_lenght: u32,
/// Completion Code. This field encodes the completion status of the command that generated the
/// event. Refer to the respective command definition for a list of the possible Completion Codes
/// associated with the command. Refer to section 6.4.5 for an enumerated list of possible error
/// conditions.
pub completion_code: u8,
#[bits(10)]
__: u16,
/// TRB Type. This field identifies the type of the TRB. Refer to Table 6-91 for the definition of the
/// Command Completion Event TRB type ID
#[bits(6, default=TrbType::TransferEvent)]
pub trb_type: TrbType,
#[bits(5)]
pub endpoint_id: u8,
#[bits(3)]
__: u8,
pub slot_id: u8,
}
impl TypedTrb for TrbTransferEvent {}
#[bitenum]
#[repr(u8)]
pub enum CommandCompletionCode {
@ -232,3 +261,18 @@ pub struct TrbEnableSlotCommand {
}
impl TypedTrb for TrbEnableSlotCommand {}
#[bitfield(u128)]
pub struct TrbAddressDeviceCommand {
pub input_context_pointer: u64,
__: u32,
#[bits(9)]
__: u16,
pub block_set_address_request: bool,
#[bits(6, default=TrbType::AddressDeviceCommand)]
trb_typ: TrbType,
__: u8,
pub slot_id: u8,
}
impl TypedTrb for TrbAddressDeviceCommand {}

View file

@ -1,22 +1,104 @@
use mammoth::mem::MemoryRegion;
use alloc::boxed::Box;
use mammoth::physical_box::PhysicalBox;
pub struct DeviceContextBaseArray {
#[allow(dead_code)]
region: MemoryRegion,
physical_addr: usize,
}
use crate::xhci::{
data_structures::{
DeviceContext, EndpointContextFields, EndpointState, EndpointType, InputContext,
TRDequeuePointer, TrbTransferEvent,
},
registers::DoorbellPointer,
trb_ring::TrbRing,
};
struct DeviceContextBaseArray(PhysicalBox<[u64]>);
impl DeviceContextBaseArray {
pub fn new() -> Self {
let (region, physical_addr) = MemoryRegion::contiguous_physical(0x1000).unwrap();
region.zero_region();
pub fn new(max_slots: u8) -> Self {
Self(PhysicalBox::default_with_count(0, max_slots as usize + 1))
}
}
pub struct DeviceSlot {
device_context: PhysicalBox<DeviceContext>,
endpoint_0_transfer_ring: TrbRing<TrbTransferEvent>,
}
impl DeviceSlot {
fn new() -> Self {
Self {
region,
physical_addr: physical_addr as usize,
device_context: PhysicalBox::new(DeviceContext::default()),
endpoint_0_transfer_ring: TrbRing::new(),
}
}
}
pub struct DeviceSlotManager {
device_context_base_array: DeviceContextBaseArray,
slots: Box<[Option<DeviceSlot>]>,
doorbells: Box<[DoorbellPointer]>,
}
impl DeviceSlotManager {
pub fn new(max_slots: u8, doorbells: Box<[DoorbellPointer]>) -> Self {
assert!(
doorbells.len() == max_slots as usize,
"Got an incorrect doorbell slice size."
);
Self {
device_context_base_array: DeviceContextBaseArray::new(max_slots),
slots: core::iter::repeat_with(|| None)
.take(max_slots as usize)
.collect(),
doorbells,
}
}
pub fn physical_addr(&self) -> usize {
self.physical_addr
pub fn device_context_base_array_physical_address(&self) -> usize {
self.device_context_base_array.0.physical_address()
}
/// Prepares a slot and an input context for an address device command.
///
/// Follows section 4.6.5 of the XHCI spec.
pub fn prep_slot_for_address_device(
&mut self,
slot_id: u8,
port_number: u8,
) -> PhysicalBox<InputContext> {
// TODO: Ensure alignment
let device_slot = DeviceSlot::new();
let mut input_context = PhysicalBox::new(InputContext::default());
// The Add Context flags for the Slot Context and the Endpoint 0 Context shall be set to 1.
input_context.input_control_context.add_context_flags = 0x3;
// See XHCI 4.5.2 for information
input_context.slot_context.fields = input_context
.slot_context
.fields
.with_root_hub_port_number(port_number)
.with_route_string(0)
.with_context_entries(1)
.with_interrupter_target(0);
// The Endpoint 0 Context data structure in the
// Input Context shall define valid values for the TR Dequeue Pointer, EP Type, Error
// Count (CErr), and Max Packet Size fields. The MaxPStreams, Max Burst Size, and
// EP State values shall be cleared to '0'
input_context.endpoint_context_0.tr_deque_pointer = TRDequeuePointer::new()
.with_pointer(device_slot.endpoint_0_transfer_ring.physical_base_address() as u64)
.with_dequeue_cycle_state(true);
input_context.endpoint_context_0.fields = EndpointContextFields::new()
.with_endpoint_type(EndpointType::Control)
.with_max_primary_streams(0)
.with_max_burst_size(0)
.with_endpoint_state(EndpointState::Disabled);
self.device_context_base_array.0[slot_id as usize] =
device_slot.device_context.physical_address() as u64;
self.slots[slot_id as usize - 1] = Some(device_slot);
input_context
}
}

View file

@ -10,17 +10,19 @@ use mammoth::write_unaligned_volatile;
use super::registers::{self};
use crate::xhci::data_structures::CommandCompletionCode;
use crate::xhci::data_structures::TrbAddressDeviceCommand;
use crate::xhci::data_structures::TrbCommandCompletion;
use crate::xhci::data_structures::TrbEnableSlotCommand;
use crate::xhci::data_structures::TrbNoOp;
use crate::xhci::data_structures::TrbPortStatusChangeEvent;
use crate::xhci::data_structures::TrbType;
use crate::xhci::data_structures::TypedTrb;
use crate::xhci::device_context_base_array::DeviceContextBaseArray;
use crate::xhci::device_context_base_array::DeviceSlotManager;
use crate::xhci::event_ring::EventRing;
use crate::xhci::registers::DoorbellPointer;
use crate::xhci::registers::HostControllerOperationalWrapper;
use crate::xhci::registers::PortStatusAndControl;
use crate::xhci::trb_ring::TrbRing;
use crate::xhci::trb_ring::CommandRing;
pub struct XHCIDriver {
#[allow(dead_code)]
@ -28,9 +30,9 @@ pub struct XHCIDriver {
capabilities: registers::HostControllerCapabilities,
operational: HostControllerOperationalWrapper,
registers_region: MemoryRegion,
command_ring: Mutex<TrbRing<TrbCommandCompletion>>,
command_ring: Mutex<CommandRing>,
event_ring: Mutex<EventRing>,
device_context_base_array: DeviceContextBaseArray,
device_slot_manager: Mutex<DeviceSlotManager>,
irq_port_cap: Capability,
}
@ -45,23 +47,25 @@ impl XHCIDriver {
// page pointed to by the BAR0.
let three_pages = 0x3000;
let address =
((pci_device.header().bars[1] as u64) << 32) | (pci_device.header().bars[0] as u64);
let registers_region = MemoryRegion::direct_physical(address, three_pages).unwrap();
((pci_device.header().bars[1] as usize) << 32) | (pci_device.header().bars[0] as usize);
let registers_region = MemoryRegion::direct_physical(address as u64, three_pages).unwrap();
let irq_port_cap = pci_device.register_msi().unwrap();
let (operational, capabilities) = HostControllerOperationalWrapper::new(address as usize);
let p1 = capabilities.params_1;
mammoth::debug!("Num Port: {:?}", p1);
let max_slots = capabilities.params_1.max_device_slots();
let doorbell_physical = address as usize + capabilities.doorbell_offset as usize;
let (command_doorbell, slot_doorbells) =
DoorbellPointer::create_command_and_slots(doorbell_physical, max_slots);
let mut driver = Self {
pci_device,
capabilities,
operational,
registers_region,
command_ring: Mutex::new(TrbRing::new()),
command_ring: Mutex::new(CommandRing::new(command_doorbell)),
event_ring: Mutex::new(EventRing::new()),
device_context_base_array: DeviceContextBaseArray::new(),
device_slot_manager: Mutex::new(DeviceSlotManager::new(max_slots, slot_doorbells)),
irq_port_cap,
};
driver.initialize();
@ -71,32 +75,15 @@ impl XHCIDriver {
fn interrupters(&self) -> &mut [registers::InterrupterRegisterSet] {
// See Table 5-35: Host Controller Runtime Registers
const INTERRUPTER_OFFSET_FROM_RUNTIME: u32 = 0x20;
let runtime = self.capabilities.runtime_register_space_offset;
let interrupter_offset = (runtime + INTERRUPTER_OFFSET_FROM_RUNTIME) as usize;
let params1 = self.capabilities.params_1;
let interrupter_offset = (self.capabilities.runtime_register_space_offset
+ INTERRUPTER_OFFSET_FROM_RUNTIME) as usize;
// SAFETY: The XHCI spec says so?
unsafe {
slice::from_raw_parts_mut(
self.registers_region.mut_ptr_at_offset(interrupter_offset),
params1.max_interrupters() as usize,
)
}
}
fn doorbells(&self) -> &mut [registers::Doorbell] {
let doorbell_offset = self.capabilities.doorbell_offset;
let params1 = self.capabilities.params_1;
// SAFETY: The XHCI spec says so?
unsafe {
slice::from_raw_parts_mut(
self.registers_region
.mut_ptr_at_offset(doorbell_offset as usize),
params1.max_device_slots() as usize,
self.capabilities.params_1.max_interrupters() as usize,
)
}
}
@ -106,45 +93,41 @@ impl XHCIDriver {
mammoth::debug!("Stopping XHCI Controller.");
// Stop the host controller.
self.operational.update(|mut o| {
o.usb_command = o.usb_command.with_run_stop(false);
o
});
self.operational
.update_command(|cmd| cmd.with_run_stop(false));
#[cfg(feature = "debug")]
mammoth::debug!("Waiting for controller to halt.");
// Sleep until the controller is halted.
let mut status = self.operational.status();
let mut status = self.operational.read_status();
while !status.host_controller_halted() {
// TODO: Sleep for how long?
mammoth::syscall::thread_sleep(50).unwrap();
status = self.operational.status();
status = self.operational.read_status();
}
#[cfg(feature = "debug")]
mammoth::debug!("Resetting Controller.");
self.operational.update(|mut o| {
o.usb_command = o.usb_command.with_host_controller_reset(false);
o
});
self.operational
.update_command(|cmd| cmd.with_host_controller_reset(true));
let mut command: registers::UsbCommand = self.operational.command();
let mut command: registers::UsbCommand = self.operational.read_command();
while command.host_controller_reset() {
// TODO: Sleep for how long?
mammoth::syscall::thread_sleep(50).unwrap();
command = self.operational.command();
command = self.operational.read_command();
}
#[cfg(feature = "debug")]
mammoth::debug!("XHCI Controller Reset, waiting ready.");
let mut status = self.operational.status();
let mut status = self.operational.read_status();
while status.controller_not_ready() {
// TODO: Sleep for how long?
mammoth::syscall::thread_sleep(50).unwrap();
status = self.operational.status();
status = self.operational.read_status();
}
#[cfg(feature = "debug")]
@ -153,34 +136,29 @@ impl XHCIDriver {
#[cfg(feature = "debug")]
mammoth::debug!("Setting Command Ring");
self.operational.update(|mut o| {
// TODO: Split this struct to make it clearer that we are setting the cycle bit here.
o.command_ring_control = (self.command_ring.lock().physical_base_address() | 1) as u64;
o
});
self.operational.set_command_ring_dequeue_pointer(
self.command_ring.lock().trb_ring.physical_base_address(),
true,
);
#[cfg(feature = "debug")]
mammoth::debug!("Setting DCBA.");
let params1 = self.capabilities.params_1;
self.operational.update(|mut o| {
o.device_context_base_address_array_pointer =
self.device_context_base_array.physical_addr() as u64;
// We tell the controller that we can support as many slots as it does because
// we allocate a full 4K page to the DCBA, which is 256 entries and the max
// slots are 255.
o.configure = o
.configure
.with_max_device_slots_enabled(params1.max_device_slots());
o
self.operational
.set_device_context_base_address_array_pointer(
self.device_slot_manager
.lock()
.device_context_base_array_physical_address(),
);
// We tell the controller that we can support as many slots as it does because
// we allocate a full 4K page to the DCBA, which is 256 entries and the max
// slots are 255.
self.operational.update_configure(|cfg| {
cfg.with_max_device_slots_enabled(self.capabilities.params_1.max_device_slots())
});
let params2 = self.capabilities.params_2;
let max_scratchpad_buffers =
(params2.max_scratchpad_buffers_hi() << 5) | params2.max_scratchpad_buffers_lo();
assert!(
max_scratchpad_buffers == 0,
self.capabilities.params_2.max_scratchpad_buffers() == 0,
"Unsupported scratchpad buffers."
);
@ -211,13 +189,8 @@ impl XHCIDriver {
registers::InterrupterManagement::new().with_interrupt_enabled(true)
);
self.operational.update(|mut o| {
o.usb_command = o
.usb_command
.with_run_stop(true)
.with_interrupter_enable(true);
o
});
self.operational
.update_command(|cmd| cmd.with_run_stop(true).with_interrupter_enable(true));
#[cfg(feature = "debug")]
mammoth::debug!("Enabled interrupts and controller.");
@ -236,7 +209,6 @@ impl XHCIDriver {
let mut event_ring = self.event_ring.lock();
while let Some(trb) = event_ring.get_next() {
event_ring.update_dequeue_pointer(&mut self.interrupters()[0]);
match trb.trb_type() {
TrbType::TransferEvent => {
todo!("Handle Transfer")
@ -244,7 +216,8 @@ impl XHCIDriver {
TrbType::CommandCompletionEvent => {
self.command_ring
.lock()
.handle_commpletion(TrbCommandCompletion::from_trb(trb));
.trb_ring
.handle_completion(TrbCommandCompletion::from_trb(trb));
}
TrbType::PortStatusChangeEvent => {
let trb = TrbPortStatusChangeEvent::from_trb(trb);
@ -258,13 +231,14 @@ impl XHCIDriver {
}
}
}
event_ring.update_dequeue_pointer(&mut self.interrupters()[0]);
}
}
async fn send_command(&self, trb: impl TypedTrb) -> TrbCommandCompletion {
let fut = self.command_ring.lock().enqueue_trb(trb.to_trb());
self.doorbells()[0].ring_command();
fut.await
// Split the future and the await so the lock is dropped before we await.
let future = { self.command_ring.lock().enqueue_command(trb) };
future.await
}
pub async fn startup(&self) {
@ -309,16 +283,16 @@ impl XHCIDriver {
.status_and_control;
#[cfg(feature = "debug")]
mammoth::debug!(
"Port status change for port {}, status= {:?}",
port_id,
port_status
);
mammoth::debug!("Port status change for port {}", port_id);
if !port_status.port_reset_change() {
mammoth::debug!("Unknown port status event, not handling.");
mammoth::debug!(
"Unknown port status event, not handling. status= {:?}",
port_status
);
return;
}
self.operational
.update_port_status(port_index, |s| s.clear_change_bits());
@ -327,5 +301,27 @@ impl XHCIDriver {
let resp = self.send_command(TrbEnableSlotCommand::new()).await;
assert!(resp.completion_code() == CommandCompletionCode::Success.into_bits());
let slot = resp.slot_id();
#[cfg(feature = "debug")]
mammoth::debug!("Creating slot data structures in slot {}.", slot);
let input_context = self
.device_slot_manager
.lock()
.prep_slot_for_address_device(slot, port_id);
#[cfg(feature = "debug")]
mammoth::debug!("Sending address device.");
let resp = self
.send_command(
TrbAddressDeviceCommand::new()
.with_slot_id(slot)
.with_input_context_pointer(input_context.physical_address() as u64),
)
.await;
assert!(resp.completion_code() == CommandCompletionCode::Success.into_bits());
}
}

View file

@ -1,7 +1,7 @@
use alloc::vec::Vec;
use crate::xhci::{
data_structures::{EventRingSegmentTable, TransferRequestBlock, TrbRingSegment, TrbType},
data_structures::{EventRingSegmentTable, TransferRequestBlock, TrbRingSegment},
registers::InterrupterRegisterSet,
trb_ring::TrbPointer,
};

View file

@ -83,7 +83,7 @@ pub struct HCSParams2 {
/// field indicates the high order 5 bits of the number of Scratchpad Buffers system software shall
/// reserve for the xHC. Refer to section 4.20 for more information.
#[bits(5, access=RO)]
pub max_scratchpad_buffers_hi: u16,
max_scratchpad_buffers_hi: u16,
/// Scratchpad Restore (SPR). Default = implementation dependent. If Max Scratchpad Buffers is >
/// 0 then this flag indicates whether the xHC uses the Scratchpad Buffers for saving state when
@ -103,7 +103,13 @@ pub struct HCSParams2 {
/// bits of the number of Scratchpad Buffers system software shall reserve for the xHC. Refer to
/// section 4.20 for more information
#[bits(5, access=RO)]
pub max_scratchpad_buffers_lo: u16,
max_scratchpad_buffers_lo: u16,
}
impl HCSParams2 {
pub fn max_scratchpad_buffers(&self) -> u16 {
(self.max_scratchpad_buffers_hi()) << 5 | self.max_scratchpad_buffers_lo()
}
}
#[bitfield(u32)]
@ -320,7 +326,7 @@ pub struct HCCParams2 {
/// Hence the grouping of parameters here.
///
/// These registers are located at the addresses specified in BAR0 and BAR1 in the PCI Header.
#[repr(C, packed)]
#[repr(C)]
#[derive(Copy, Clone)]
pub struct HostControllerCapabilities {
pub cap_length_and_version: HostControllerCapabilitiesLengthAndVersion,

View file

@ -1,4 +1,8 @@
use core::ptr::NonNull;
use alloc::{boxed::Box, vec::Vec};
use bitfield_struct::bitfield;
use volatile::VolatileRef;
/// The Doorbell Array is organized as an array of up to 256 Doorbell Registers. One
/// 32-bit Doorbell Register is defined in the array for each Device Slot. System
@ -64,18 +68,41 @@ pub struct Doorbell {
db_stream_id: u16,
}
impl Doorbell {
pub fn ring(&mut self, target: u8) {
#[repr(transparent)]
pub struct DoorbellPointer(VolatileRef<'static, Doorbell>);
impl DoorbellPointer {
// Construct a new doorbell pointer.
fn new(doorbell: NonNull<Doorbell>) -> Self {
// SAFETY:
// - We know this is a valid reference.
unsafe {
core::ptr::write_volatile(
self as *mut _,
Doorbell::new().with_db_target(target).with_db_stream_id(0),
);
}
// - We allocate this memory in the create
Self(unsafe { VolatileRef::new(doorbell) })
}
pub fn ring_command(&mut self) {
self.ring(0)
pub fn create_command_and_slots(
doorbell_physical: usize,
max_slots: u8,
) -> (Self, Box<[Self]>) {
// Add one for the command doorbell.
let doorbell_cnt = max_slots as usize + 1;
let doorbell_array_size = size_of::<Doorbell>() * doorbell_cnt;
let doorbells: NonNull<Doorbell> = NonNull::new(
mammoth::mem::map_direct_physical_and_leak(doorbell_physical, doorbell_array_size),
)
.unwrap();
let first = DoorbellPointer::new(doorbells);
let remainder = (1..=max_slots)
.map(|offset| {
// SAFETY: We just allocated the array of this size above.
DoorbellPointer::new(unsafe { doorbells.add(offset as usize) })
})
.collect();
(first, remainder)
}
pub fn ring(&mut self, target: u8) {
self.0
.as_mut_ptr()
.write(Doorbell::new().with_db_target(target))
}
}

View file

@ -250,7 +250,68 @@ pub struct UsbStatus {
__: u32,
}
#[bitfield(u32)]
impl UsbStatus {
// Returns a copy of this object that can be written without overwritting flags that are RW1C.
fn preserving_flags(&self) -> UsbStatus {
self.with_host_system_error(false)
.with_event_interrupt(false)
.with_port_change_detect(false)
.with_save_restore_error(false)
}
}
/// Internal data structure to ensure 64 bit reads and writes.
#[bitfield(u64)]
struct CommandAndStatus {
#[bits(32)]
usb_command: UsbCommand,
#[bits(32)]
usb_status: UsbStatus,
}
impl CommandAndStatus {
fn update_command(&self, f: impl Fn(UsbCommand) -> UsbCommand) -> CommandAndStatus {
CommandAndStatus::new()
.with_usb_command(f(self.usb_command()))
.with_usb_status(self.usb_status().preserving_flags())
}
fn update_status(&self, f: impl Fn(UsbStatus) -> UsbStatus) -> CommandAndStatus {
self.with_usb_status(f(self.usb_status()).preserving_flags())
}
}
#[bitfield(u64)]
struct PageSize {
///Page Size RO. Default = Implementation defined. This field defines the page size supported by
/// the xHC implementation. This xHC supports a page size of 2^(n+12) if bit n is Set. For example, if
/// bit 0 is Set, the xHC supports 4k byte page sizes.
///
/// For a Virtual Function, this register reflects the page size selected in the System Page Size field
/// of the SR-IOV Extended Capability structure. For the Physical Function 0, this register reflects
/// the implementation dependent default xHC page size.
///
/// Various xHC resources reference PAGESIZE to describe their minimum alignment requirements.
///
/// The maximum possible page size is 128M.
#[bits(access=RO)]
page_size: u32,
__: u32,
}
#[bitfield(u64)]
struct DeviceNotificationControl {
__: u32,
/// This register is used by software to enable or disable the reporting of the
/// reception of specific USB Device Notification Transaction Packets. A Notification
/// Enable (Nx, where x = 0 to 15) flag is defined for each of the 16 possible de vice
/// notification types. If a flag is set for a specific notification type, a Device
/// Notification Event shall be generated when the respective notification packet is
/// received. After reset all notifications are disabled. Refer to section 6.4.2.7
device_notification_control: u32,
}
#[bitfield(u64)]
pub struct UsbConfigure {
/// Max Device Slots Enabled (MaxSlotsEn) RW. Default = 0. This field specifies the maximum
/// number of enabled Device Slots. Valid values are in the range of 0 to MaxSlots. Enabled Devices
@ -273,6 +334,9 @@ pub struct UsbConfigure {
#[bits(22)]
__: u32,
// Pad to 64 bits for the purposes of reads and writes.
__: u32,
}
/// XHCI Spec Section 5.4
@ -281,44 +345,41 @@ pub struct UsbConfigure {
/// Operational Base shall be Dword aligned and is calculated by adding the value
/// of the Capability Registers Length (CAPLENGTH) register (refer to Section 5.3.1)
/// to the Capability Base address. All registers are multiples of 32 bits in length
#[repr(C, packed)]
#[repr(C)]
#[derive(Copy, Clone)]
pub struct HostControllerOperational {
pub usb_command: UsbCommand,
pub usb_status: UsbStatus,
pub page_size: u32,
__: u32,
___: u32,
pub device_notification_control: u32,
/// Bit 0: Ring Cycle State (RO)
command_and_status: CommandAndStatus,
page_size: PageSize,
device_notification_control: DeviceNotificationControl,
/// Bit 0: Ring Cycle State (RW)
/// Bit 1: Command Stop (RW1S)
/// Bit 2: Command Abort (RW1S)
/// Bit 3: Command Ring Running (RO)
pub command_ring_control: u64,
____: u64,
_____: u64,
command_ring_control: u64,
__: u64,
___: u64,
/// The Device Context Base Address Array Pointer Register identifies the base
/// address of the Device Context Base Address Array.
/// The memory structure referenced by this physical memory pointer is assumed to
/// be physically contiguous and 64-byte aligned.
pub device_context_base_address_array_pointer: u64,
pub configure: UsbConfigure,
device_context_base_address_array_pointer: u64,
configure: UsbConfigure,
}
const _: () = assert!(size_of::<HostControllerOperational>() == 0x3C);
const _: () = assert!(size_of::<HostControllerOperational>() == 0x40);
pub struct HostControllerOperationalWrapper {
// TODO: Fix alignment of this type so we can do more targetted reads and writes.
operational: Mutex<VolatileRef<'static, HostControllerOperational>>,
// TODO: This should maybe be its own structure.
ports: Vec<Mutex<VolatileRef<'static, HostControllerUsbPort>>>,
}
#[allow(dead_code)]
impl HostControllerOperationalWrapper {
pub fn new(mmio_address: usize) -> (Self, HostControllerCapabilities) {
const MAP_SIZE: usize = 0x1000;
let caps_ptr: *mut HostControllerCapabilities =
map_direct_physical_and_leak(mmio_address as u64, MAP_SIZE as u64);
map_direct_physical_and_leak(mmio_address, MAP_SIZE);
// SAFETY:
// - The pointer is valid.
@ -331,7 +392,11 @@ impl HostControllerOperationalWrapper {
.read()
};
let cap_length_and_version = capabilities.cap_length_and_version;
assert!(
capabilities.cap_params_1.supports_64_bit(),
"We only support 64 bit XHCI"
);
// TODO: I don't think we acutally handle this properly.
// SAFETY: XHCI Spec says that this resides in a single page of memory which we mapped
// above.
@ -339,6 +404,7 @@ impl HostControllerOperationalWrapper {
// BAR0 Size Allocation
// If virtualization is supported, the Capability and Operational Register sets, and
// the Extended Capabilities may reside in a single page of virtual memory,
let cap_length_and_version = capabilities.cap_length_and_version;
let operational_ptr = unsafe {
(caps_ptr as *mut u8).add(cap_length_and_version.cap_length() as usize)
as *mut HostControllerOperational
@ -350,15 +416,15 @@ impl HostControllerOperationalWrapper {
let ports_addr = unsafe { (operational_ptr as *mut u8).add(PORT_OFFSET) as usize };
let ports_space = MAP_SIZE - cap_length_and_version.cap_length() as usize - PORT_OFFSET;
let max_ports_we_support = ports_space / size_of::<HostControllerUsbPort>();
let params_1 = capabilities.params_1;
let max_ports = capabilities.params_1.max_ports();
assert!(
params_1.max_ports() as usize <= max_ports_we_support,
max_ports as usize <= max_ports_we_support,
"TODO: Support more ports."
);
let mut ports = Vec::new();
let ports_addr = ports_addr as *mut HostControllerUsbPort;
for port_index in 0..params_1.max_ports() {
for port_index in 0..max_ports {
ports.push(unsafe {
Mutex::new(VolatileRef::new(
NonNull::new(ports_addr.add(port_index as usize)).unwrap(),
@ -376,16 +442,53 @@ impl HostControllerOperationalWrapper {
(operational, capabilities)
}
pub fn update(&self, f: impl Fn(HostControllerOperational) -> HostControllerOperational) {
self.operational.lock().as_mut_ptr().update(f);
pub fn read_command(&self) -> UsbCommand {
let locked = self.operational.lock();
let op = locked.as_ptr();
map_field!(op.command_and_status).read().usb_command()
}
pub fn status(&self) -> UsbStatus {
self.operational.lock().as_ptr().read().usb_status
pub fn update_command(&self, f: impl Fn(UsbCommand) -> UsbCommand) {
let mut locked = self.operational.lock();
let op = locked.as_mut_ptr();
map_field!(op.command_and_status).update(|c_and_s| c_and_s.update_command(f));
}
pub fn command(&self) -> UsbCommand {
self.operational.lock().as_ptr().read().usb_command
pub fn read_status(&self) -> UsbStatus {
let locked = self.operational.lock();
let op = locked.as_ptr();
map_field!(op.command_and_status).read().usb_status()
}
pub fn update_status(&self, f: impl Fn(UsbStatus) -> UsbStatus) {
let mut locked = self.operational.lock();
let op = locked.as_mut_ptr();
map_field!(op.command_and_status).update(|c_and_s| c_and_s.update_status(f));
}
pub fn set_device_context_base_address_array_pointer(&self, pointer: usize) {
let mut locked = self.operational.lock();
let op = locked.as_mut_ptr();
map_field!(op.device_context_base_address_array_pointer).write(pointer as u64);
}
pub fn set_command_ring_dequeue_pointer(&self, pointer: usize, cycle_bit: bool) {
// TODO: Assert that the command ring is not running here.
let mut locked = self.operational.lock();
let op = locked.as_mut_ptr();
map_field!(op.command_ring_control).write(pointer as u64 | cycle_bit as u64);
}
pub fn read_configure(&self) -> UsbConfigure {
let locked = self.operational.lock();
let op = locked.as_ptr();
map_field!(op.configure).read()
}
pub fn update_configure(&self, f: impl Fn(UsbConfigure) -> UsbConfigure) {
let mut locked = self.operational.lock();
let op = locked.as_mut_ptr();
map_field!(op.configure).update(f);
}
pub fn get_port(&self, index: usize) -> HostControllerUsbPort {

View file

@ -3,7 +3,12 @@ use core::task::{Poll, Waker};
use alloc::{collections::vec_deque::VecDeque, sync::Arc, vec::Vec};
use mammoth::sync::Mutex;
use crate::xhci::data_structures::{TransferRequestBlock, TrbLink, TrbRingSegment, TypedTrb};
use crate::xhci::{
data_structures::{
TransferRequestBlock, TrbCommandCompletion, TrbLink, TrbRingSegment, TypedTrb,
},
registers::DoorbellPointer,
};
struct TrbFutureState<T> {
/// Physical Address for the enqueued TRB.
@ -142,7 +147,7 @@ impl<T: TypedTrb> TrbRing<T> {
}
}
pub fn handle_commpletion(&mut self, completion_trb: T) {
pub fn handle_completion(&mut self, completion_trb: T) {
let trb = completion_trb.to_trb();
let paddr = trb.parameter() as usize;
let completion = self.pending_futures.pop_front().unwrap();
@ -161,3 +166,25 @@ impl<T: TypedTrb> TrbRing<T> {
}
}
}
pub struct CommandRing {
pub trb_ring: TrbRing<TrbCommandCompletion>,
doorbell: DoorbellPointer,
}
impl CommandRing {
pub fn new(doorbell: DoorbellPointer) -> Self {
Self {
trb_ring: TrbRing::new(),
doorbell,
}
}
// We have to explicitly return a future her
pub fn enqueue_command(&mut self, command: impl TypedTrb) -> TrbFuture<TrbCommandCompletion> {
let fut = self.trb_ring.enqueue_trb(command.to_trb());
// Command Doorbell is always 0.
self.doorbell.ring(0);
fut
}
}