Compare commits

...

1 commit

Author SHA1 Message Date
a8d97ce0b3 Add rust lint CI job.
Some checks failed
Check / Check Rust (pull_request) Failing after 21s
2025-12-14 00:53:28 -08:00
30 changed files with 182 additions and 108 deletions

47
.forgejo/workflows/ci.yml Normal file
View file

@ -0,0 +1,47 @@
name: Check
on:
push:
branches: [main]
pull_request:
branches: [main]
env:
# Should speed up builds.
CARGO_INCREMENTAL: 0
# Should reduce the size of ./target to improve cache load/store.
CARGO_PROFILE_TEST_DEBUG: 0
jobs:
check:
name: Check Rust
runs-on: docker
steps:
- name: Checkout Repo
uses: actions/checkout@v4
- name: Install Rust
uses: https://codeberg.org/wackbyte/rust-toolchain@trunk
with:
toolchain: nightly-2025-10-02
targets: x86_64-unknown-none
components: rustfmt, clippy, rust-src
- name: Cache
uses: https://github.com/Swatinem/rust-cache@v2
with:
# Don't cache ~/.cargo/bin since we restore the cache after we install things there
cache-bin: "false"
workspaces: "backend"
- name: "Check Format"
run: cargo fmt --check
working-directory: rust
- name: "Build"
run: |
cargo build --locked
working-directory: rust
- name: "Lint"
run: |
rustup component add clippy
cargo clippy --locked -- -D warnings
working-directory: rust

View file

@ -1,3 +1,3 @@
[toolchain] [toolchain]
channel = "nightly-2025-10-02" channel = "nightly-2025-10-02"
components = ["rustfmt", "rust-analyzer"] components = ["rustfmt", "rust-analyzer", "clippy", "rust-src"]

View file

@ -94,7 +94,7 @@ impl Ext2Driver {
/// Updates the cached inode tables to contain the inode table for /// Updates the cached inode tables to contain the inode table for
/// a specific group. /// a specific group.
fn populate_inode_table_if_none(&mut self, block_group_num: usize) { fn populate_inode_table_if_none(&mut self, block_group_num: usize) {
if let None = self.inode_table_map[block_group_num] { if self.inode_table_map[block_group_num].is_none() {
debug!( debug!(
"Cache MISS on inode table for block_group {}", "Cache MISS on inode table for block_group {}",
block_group_num block_group_num
@ -148,17 +148,15 @@ impl Ext2Driver {
let dbl_indr_block_mem = let dbl_indr_block_mem =
MemoryRegion::from_cap(self.reader.read(block_num, 1).unwrap()).unwrap(); MemoryRegion::from_cap(self.reader.read(block_num, 1).unwrap()).unwrap();
let dbl_indr_blocks: &[u32] = dbl_indr_block_mem.slice(); let dbl_indr_blocks: &[u32] = &dbl_indr_block_mem.slice()[0..num_dbl_indr];
let mut blocks_to_read = Vec::new(); let mut blocks_to_read = Vec::new();
for i in 0..num_dbl_indr { for (i, dbl_indr_block) in dbl_indr_blocks.iter().enumerate() {
let num_blocks_in_single = min(num_blocks - (256 * i), 256); let num_blocks_in_single = min(num_blocks - (256 * i), 256);
blocks_to_read.append( blocks_to_read.append(
&mut self.get_blocks_from_single_indirect( &mut self
dbl_indr_blocks[i] as u64, .get_blocks_from_single_indirect(*dbl_indr_block as u64, num_blocks_in_single),
num_blocks_in_single,
),
); );
} }
@ -176,7 +174,7 @@ impl Ext2Driver {
let mut blocks = Vec::new(); let mut blocks = Vec::new();
while let Some(block) = iter.next() { for block in iter {
if block as u64 == (curr_block.lba + curr_block.size) { if block as u64 == (curr_block.lba + curr_block.size) {
curr_block.size += 1; curr_block.size += 1;
} else { } else {

View file

@ -111,7 +111,9 @@ pub struct Inode {
const _: () = assert!(size_of::<Inode>() == 128); const _: () = assert!(size_of::<Inode>() == 128);
#[allow(dead_code)]
pub const EXT2_FT_FILE: u8 = 0x1; pub const EXT2_FT_FILE: u8 = 0x1;
#[allow(dead_code)]
pub const EXT2_FT_DIR: u8 = 0x2; pub const EXT2_FT_DIR: u8 = 0x2;
#[repr(C, packed)] #[repr(C, packed)]

View file

@ -2,7 +2,6 @@ use core::ffi::c_void;
use crate::zion::{self, z_cap_t, ZError}; use crate::zion::{self, z_cap_t, ZError};
#[must_use]
fn syscall<T>(id: u64, req: &T) -> Result<(), ZError> { fn syscall<T>(id: u64, req: &T) -> Result<(), ZError> {
unsafe { unsafe {
let resp = zion::SysCall1(id, req as *const T as *const c_void); let resp = zion::SysCall1(id, req as *const T as *const c_void);

View file

@ -1,5 +1,4 @@
use crate::cap::Capability; use crate::cap::Capability;
use crate::debug;
use crate::init; use crate::init;
use crate::syscall; use crate::syscall;
use crate::zion::ZError; use crate::zion::ZError;
@ -237,7 +236,7 @@ fn load_program_segment(
let page_offset = prog_header.vaddr & 0xFFF; let page_offset = prog_header.vaddr & 0xFFF;
let mem_size = page_offset + prog_header.mem_size; let mem_size = page_offset + prog_header.mem_size;
let mem_object = crate::mem::MemoryRegion::new(mem_size)?; let mut mem_object = crate::mem::MemoryRegion::new(mem_size)?;
for i in mem_object.mut_slice() { for i in mem_object.mut_slice() {
*i = 0; *i = 0;

View file

@ -28,7 +28,7 @@ pub fn parse_init_port(port_cap: z_cap_t) {
let mut caps: [u64; 1] = [0]; let mut caps: [u64; 1] = [0];
let resp = syscall::port_poll(&init_port, &mut bytes, &mut caps); let resp = syscall::port_poll(&init_port, &mut bytes, &mut caps);
if let Err(_) = resp { if resp.is_err() {
break; break;
} }

View file

@ -2,21 +2,20 @@ use alloc::string::String;
use alloc::vec::Vec; use alloc::vec::Vec;
use core::fmt; use core::fmt;
#[derive(Default)]
pub struct Writer { pub struct Writer {
int_vec: Vec<u8>, int_vec: Vec<u8>,
} }
impl Writer { impl Writer {
pub fn new() -> Self { pub fn new() -> Self {
Self { Writer::default()
int_vec: Vec::new(),
}
} }
} }
impl Into<String> for Writer { impl From<Writer> for String {
fn into(self) -> String { fn from(value: Writer) -> Self {
String::from_utf8(self.int_vec).expect("Failed to convert") String::from_utf8(value.int_vec).expect("Failed to convert")
} }
} }

View file

@ -85,7 +85,7 @@ impl MemoryRegion {
} }
} }
pub fn mut_slice<T>(&self) -> &mut [T] { pub fn mut_slice<T>(&mut self) -> &mut [T] {
unsafe { unsafe {
slice::from_raw_parts_mut( slice::from_raw_parts_mut(
self.virt_addr as *mut T, self.virt_addr as *mut T,

View file

@ -50,7 +50,7 @@ impl<T> DerefMut for MutexGuard<'_, T> {
} }
} }
impl<T> Mutex<T> { impl<'a, T> Mutex<T> {
pub fn new(data: T) -> Mutex<T> { pub fn new(data: T) -> Mutex<T> {
Mutex { Mutex {
cap: syscall::mutex_create().unwrap(), cap: syscall::mutex_create().unwrap(),
@ -58,7 +58,7 @@ impl<T> Mutex<T> {
} }
} }
pub fn lock(&self) -> MutexGuard<T> { pub fn lock(&'a self) -> MutexGuard<'a, T> {
syscall::mutex_lock(&self.cap).unwrap(); syscall::mutex_lock(&self.cap).unwrap();
MutexGuard { mutex: self } MutexGuard { mutex: self }
@ -70,3 +70,12 @@ impl<T> Drop for MutexGuard<'_, T> {
syscall::mutex_release(&self.mutex.cap).unwrap(); syscall::mutex_release(&self.mutex.cap).unwrap();
} }
} }
impl<T> Default for Mutex<T>
where
T: Default,
{
fn default() -> Self {
Self::new(T::default())
}
}

View file

@ -9,7 +9,6 @@ use core::ffi::c_void;
#[cfg(feature = "hosted")] #[cfg(feature = "hosted")]
use core::panic::PanicInfo; use core::panic::PanicInfo;
#[must_use]
fn syscall<T>(id: u64, req: &T) -> Result<(), ZError> { fn syscall<T>(id: u64, req: &T) -> Result<(), ZError> {
unsafe { unsafe {
let resp = zion::SysCall1(id, req as *const T as *const c_void); let resp = zion::SysCall1(id, req as *const T as *const c_void);

View file

@ -48,7 +48,7 @@ struct TaskWaker {
} }
impl TaskWaker { impl TaskWaker {
fn new(task_id: TaskId, task_queue: Arc<Mutex<VecDeque<TaskId>>>) -> Waker { fn create_waker(task_id: TaskId, task_queue: Arc<Mutex<VecDeque<TaskId>>>) -> Waker {
Waker::from(Arc::new(TaskWaker { Waker::from(Arc::new(TaskWaker {
task_id, task_id,
task_queue, task_queue,
@ -69,6 +69,7 @@ impl Wake for TaskWaker {
} }
} }
#[derive(Default)]
pub struct Executor { pub struct Executor {
tasks: Arc<Mutex<BTreeMap<TaskId, Task>>>, tasks: Arc<Mutex<BTreeMap<TaskId, Task>>>,
// TODO: Consider a better datastructure for this. // TODO: Consider a better datastructure for this.
@ -78,11 +79,7 @@ pub struct Executor {
impl Executor { impl Executor {
pub fn new() -> Executor { pub fn new() -> Executor {
Executor { Executor::default()
tasks: Arc::new(Mutex::new(BTreeMap::new())),
task_queue: Arc::new(Mutex::new(VecDeque::new())),
waker_cache: BTreeMap::new(),
}
} }
pub fn spawn(&mut self, task: Task) { pub fn spawn(&mut self, task: Task) {
@ -100,7 +97,7 @@ impl Executor {
let waker = self let waker = self
.waker_cache .waker_cache
.entry(task_id) .entry(task_id)
.or_insert_with(|| TaskWaker::new(task_id, self.task_queue.clone())); .or_insert_with(|| TaskWaker::create_waker(task_id, self.task_queue.clone()));
let mut ctx = Context::from_waker(waker); let mut ctx = Context::from_waker(waker);
match task.poll(&mut ctx) { match task.poll(&mut ctx) {
Poll::Ready(()) => { Poll::Ready(()) => {

View file

@ -40,7 +40,7 @@ where
let raw_main = Box::into_raw(Box::new(main)); let raw_main = Box::into_raw(Box::new(main));
let proc_cap = Capability::take_copy(unsafe { crate::init::SELF_PROC_CAP }).unwrap(); let proc_cap = Capability::take_copy(unsafe { crate::init::SELF_PROC_CAP }).unwrap();
let cap = syscall::thread_create(&proc_cap).unwrap(); let cap = syscall::thread_create(&proc_cap).unwrap();
syscall::thread_start(&cap, entry_point as u64, raw_main as u64, 0).unwrap(); syscall::thread_start(&cap, entry_point as usize as u64, raw_main as u64, 0).unwrap();
JoinHandle { cap } JoinHandle { cap }
} }

View file

@ -2,8 +2,7 @@ use alloc::vec::Vec;
use mammoth::{cap::Capability, mem::MemoryRegion, syscall, zion::ZError}; use mammoth::{cap::Capability, mem::MemoryRegion, syscall, zion::ZError};
use crate::header::{ use crate::header::{
PciCapabilityPointer, PciDeviceHeader, PciHeaderType, PciMsiCapability, PciMsiControl, PciCapabilityPointer, PciDeviceHeader, PciHeaderType, PciMsiCapability, get_header_type,
get_header_type,
}; };
pub struct PciDevice { pub struct PciDevice {
@ -11,7 +10,7 @@ pub struct PciDevice {
} }
impl PciDevice { impl PciDevice {
pub fn from(mut memory_region: MemoryRegion) -> Result<Self, ZError> { pub fn from(memory_region: MemoryRegion) -> Result<Self, ZError> {
match get_header_type(&memory_region)? { match get_header_type(&memory_region)? {
PciHeaderType::Device => {} PciHeaderType::Device => {}
t => { t => {

View file

@ -7,7 +7,7 @@ use mammoth::thread;
#[allow(dead_code)] #[allow(dead_code)]
#[derive(PartialEq, Eq, PartialOrd, Ord, Copy, Clone)] #[derive(PartialEq, Eq, PartialOrd, Ord, Copy, Clone)]
enum Keycode { enum Keycode {
UnknownKeycode = 0x0, Unknown = 0x0,
A = 0x1, A = 0x1,
B = 0x2, B = 0x2,
@ -135,7 +135,7 @@ impl Keycode {
0x37 => Keycode::Period, 0x37 => Keycode::Period,
0x38 => Keycode::FSlash, 0x38 => Keycode::FSlash,
0x39 => Keycode::Esc, 0x39 => Keycode::Esc,
_ => Keycode::UnknownKeycode, _ => Keycode::Unknown,
} }
} }
} }

View file

@ -10,10 +10,12 @@ static mut YELLOWSTONE_INIT: Option<YellowstoneClient> = None;
pub fn from_init_endpoint() -> &'static mut YellowstoneClient { pub fn from_init_endpoint() -> &'static mut YellowstoneClient {
unsafe { unsafe {
if let None = YELLOWSTONE_INIT { #[allow(static_mut_refs)]
if YELLOWSTONE_INIT.is_none() {
YELLOWSTONE_INIT = Some(YellowstoneClient::new(Capability::take(INIT_ENDPOINT))); YELLOWSTONE_INIT = Some(YellowstoneClient::new(Capability::take(INIT_ENDPOINT)));
} }
#[allow(static_mut_refs)]
YELLOWSTONE_INIT.as_mut().unwrap() YELLOWSTONE_INIT.as_mut().unwrap()
} }
} }

View file

@ -12,7 +12,11 @@ mod tests {
#[test] #[test]
fn basic_serialization() -> Result<(), ZError> { fn basic_serialization() -> Result<(), ZError> {
let basic = Basic { unsigned_int: 82, signed_int: -1234, strn: "abc".to_string() }; let basic = Basic {
unsigned_int: 82,
signed_int: -1234,
strn: "abc".to_string(),
};
let mut buf = ByteBuffer::<1024>::new(); let mut buf = ByteBuffer::<1024>::new();
let mut caps = Vec::new(); let mut caps = Vec::new();
@ -27,7 +31,11 @@ mod tests {
#[test] #[test]
fn basic_serialization_as_request() -> Result<(), ZError> { fn basic_serialization_as_request() -> Result<(), ZError> {
let basic = Basic { unsigned_int: 82, signed_int: -1234, strn: "abc".to_string() }; let basic = Basic {
unsigned_int: 82,
signed_int: -1234,
strn: "abc".to_string(),
};
let mut buf = ByteBuffer::<1024>::new(); let mut buf = ByteBuffer::<1024>::new();
let mut caps = Vec::new(); let mut caps = Vec::new();
@ -63,7 +71,9 @@ mod tests {
#[test] #[test]
fn repeated_serialization() -> Result<(), ZError> { fn repeated_serialization() -> Result<(), ZError> {
let rep = Repeated { unsigned_ints: vec![0, 1, 3],}; let rep = Repeated {
unsigned_ints: vec![0, 1, 3],
};
let mut buf = ByteBuffer::<1024>::new(); let mut buf = ByteBuffer::<1024>::new();
let mut caps = Vec::new(); let mut caps = Vec::new();
@ -79,9 +89,13 @@ mod tests {
#[test] #[test]
fn nested_serialization() -> Result<(), ZError> { fn nested_serialization() -> Result<(), ZError> {
let nested = Nested { let nested = Nested {
basic: Basic { unsigned_int: 82, signed_int: -1234, strn: "abc".to_string() }, basic: Basic {
cap1: Cap { cap: 37}, unsigned_int: 82,
cap2: Cap { cap: 39}, signed_int: -1234,
strn: "abc".to_string(),
},
cap1: Cap { cap: 37 },
cap2: Cap { cap: 39 },
}; };
let mut buf = ByteBuffer::<1024>::new(); let mut buf = ByteBuffer::<1024>::new();
@ -90,7 +104,6 @@ mod tests {
let parsed = Nested::parse(&buf, 0, &caps)?; let parsed = Nested::parse(&buf, 0, &caps)?;
assert!(parsed == nested); assert!(parsed == nested);
Ok(()) Ok(())
@ -99,9 +112,19 @@ mod tests {
#[test] #[test]
fn repeated_nested_serialization() -> Result<(), ZError> { fn repeated_nested_serialization() -> Result<(), ZError> {
let nested = RepeatedNested { let nested = RepeatedNested {
basics: vec![Basic { unsigned_int: 82, signed_int: -1234, strn: "abc".to_string(),}, basics: vec![
Basic { unsigned_int: 21, signed_int: -8, strn: "def".to_string(), },], Basic {
caps: vec![Cap{ cap: 123}, Cap {cap: 12343}], unsigned_int: 82,
signed_int: -1234,
strn: "abc".to_string(),
},
Basic {
unsigned_int: 21,
signed_int: -8,
strn: "def".to_string(),
},
],
caps: vec![Cap { cap: 123 }, Cap { cap: 12343 }],
}; };
let mut buf = ByteBuffer::<1024>::new(); let mut buf = ByteBuffer::<1024>::new();
@ -110,12 +133,8 @@ mod tests {
let parsed = RepeatedNested::parse(&buf, 0, &caps)?; let parsed = RepeatedNested::parse(&buf, 0, &caps)?;
assert!(parsed == nested); assert!(parsed == nested);
Ok(()) Ok(())
} }
} }

View file

@ -5,12 +5,19 @@ pub struct ByteBuffer<const N: usize> {
buffer: Box<[u8; N]>, buffer: Box<[u8; N]>,
} }
impl<const N: usize> ByteBuffer<N> { impl<const N: usize> Default for ByteBuffer<N> {
pub fn new() -> Self { fn default() -> Self {
Self { Self {
buffer: Box::new([0; N]), buffer: Box::new([0; N]),
} }
} }
}
impl<const N: usize> ByteBuffer<N> {
pub fn new() -> Self {
ByteBuffer::default()
}
pub fn size(&self) -> u64 { pub fn size(&self) -> u64 {
N as u64 N as u64
} }
@ -54,7 +61,7 @@ impl<const N: usize> ByteBuffer<N> {
if (len + offset) > N { if (len + offset) > N {
return Err(ZError::BUFFER_SIZE); return Err(ZError::BUFFER_SIZE);
} }
Ok(alloc::str::from_utf8(&self.buffer[offset..offset + len]) alloc::str::from_utf8(&self.buffer[offset..offset + len])
.map_err(|_| ZError::INVALID_ARGUMENT)?) .map_err(|_| ZError::INVALID_ARGUMENT)
} }
} }

View file

@ -34,5 +34,5 @@ pub fn call_endpoint<Req: YunqMessage, Resp: YunqMessage, const N: usize>(
return Err(ZError::from(resp_code)); return Err(ZError::from(resp_code));
} }
Ok(Resp::parse_from_request(&byte_buffer, &cap_buffer)?) Resp::parse_from_request(byte_buffer, &cap_buffer)
} }

View file

@ -28,7 +28,7 @@ pub fn parse_repeated_message<T: YunqMessage, const N: usize>(
buf: &ByteBuffer<N>, buf: &ByteBuffer<N>,
mut offset: usize, mut offset: usize,
len: usize, len: usize,
caps: &Vec<z_cap_t>, caps: &[z_cap_t],
) -> Result<Vec<T>, ZError> { ) -> Result<Vec<T>, ZError> {
let mut repeated = Vec::new(); let mut repeated = Vec::new();
for _ in 0..len { for _ in 0..len {
@ -43,18 +43,18 @@ pub fn parse_repeated_message<T: YunqMessage, const N: usize>(
pub fn serialize_repeated<T: Copy, const N: usize>( pub fn serialize_repeated<T: Copy, const N: usize>(
buf: &mut ByteBuffer<N>, buf: &mut ByteBuffer<N>,
offset: usize, offset: usize,
data: &Vec<T>, data: &[T],
) -> Result<usize, ZError> { ) -> Result<usize, ZError> {
for i in 0..data.len() { for (i, val) in data.iter().enumerate() {
buf.write_at(offset + (i * size_of::<T>()), data[i])?; buf.write_at(offset + (i * size_of::<T>()), val)?;
} }
Ok(offset + (data.len() * size_of::<T>())) Ok(offset + size_of_val(data))
} }
pub fn serialize_repeated_message<T: YunqMessage, const N: usize>( pub fn serialize_repeated_message<T: YunqMessage, const N: usize>(
buf: &mut ByteBuffer<N>, buf: &mut ByteBuffer<N>,
mut offset: usize, mut offset: usize,
data: &Vec<T>, data: &[T],
caps: &mut Vec<z_cap_t>, caps: &mut Vec<z_cap_t>,
) -> Result<usize, ZError> { ) -> Result<usize, ZError> {
for item in data { for item in data {
@ -76,14 +76,14 @@ pub trait YunqMessage {
fn parse<const N: usize>( fn parse<const N: usize>(
buf: &ByteBuffer<N>, buf: &ByteBuffer<N>,
offset: usize, offset: usize,
caps: &Vec<z_cap_t>, caps: &[z_cap_t],
) -> Result<Self, ZError> ) -> Result<Self, ZError>
where where
Self: Sized; Self: Sized;
fn parse_from_request<const N: usize>( fn parse_from_request<const N: usize>(
buf: &ByteBuffer<N>, buf: &ByteBuffer<N>,
caps: &Vec<z_cap_t>, caps: &[z_cap_t],
) -> Result<Self, ZError> ) -> Result<Self, ZError>
where where
Self: Sized, Self: Sized,
@ -92,7 +92,7 @@ pub trait YunqMessage {
return Err(ZError::INVALID_RESPONSE); return Err(ZError::INVALID_RESPONSE);
} }
Ok(Self::parse(&buf, 16, &caps)?) Self::parse(buf, 16, caps)
} }
fn serialize<const N: usize>( fn serialize<const N: usize>(
@ -109,7 +109,7 @@ pub trait YunqMessage {
caps: &mut Vec<z_cap_t>, caps: &mut Vec<z_cap_t>,
) -> Result<usize, ZError> { ) -> Result<usize, ZError> {
buf.write_at(0, SENTINEL)?; buf.write_at(0, SENTINEL)?;
buf.write_at(8, request_id as u64)?; buf.write_at(8, request_id)?;
let length = self.serialize(buf, 16, caps)?; let length = self.serialize(buf, 16, caps)?;
@ -125,7 +125,7 @@ impl YunqMessage for Empty {
fn parse<const N: usize>( fn parse<const N: usize>(
_buf: &ByteBuffer<N>, _buf: &ByteBuffer<N>,
_offset: usize, _offset: usize,
_caps: &Vec<z_cap_t>, _caps: &[z_cap_t],
) -> Result<Self, ZError> ) -> Result<Self, ZError>
where where
Self: Sized, Self: Sized,

View file

@ -37,7 +37,7 @@ pub trait YunqServer {
.expect("Failed to reply"), .expect("Failed to reply"),
Err(err) => { Err(err) => {
crate::message::serialize_error(&mut byte_buffer, err); crate::message::serialize_error(&mut byte_buffer, err);
syscall::reply_port_send(reply_port_cap, &byte_buffer.slice(0x10), &[]) syscall::reply_port_send(reply_port_cap, byte_buffer.slice(0x10), &[])
.expect("Failed to reply w/ error") .expect("Failed to reply w/ error")
} }
} }
@ -84,11 +84,11 @@ where
.at::<u64>(8) .at::<u64>(8)
.expect("Failed to access request length."); .expect("Failed to access request length.");
let self_clone = self.clone(); let self_clone = self.clone();
spawner.spawn(Task::new((async move || { spawner.spawn(Task::new(async move {
self_clone self_clone
.handle_request_and_response(method, byte_buffer, cap_buffer, reply_port_cap) .handle_request_and_response(method, byte_buffer, cap_buffer, reply_port_cap)
.await; .await
})())); }));
} }
} }
@ -113,12 +113,10 @@ where
.expect("Failed to reply"), .expect("Failed to reply"),
Err(err) => { Err(err) => {
crate::message::serialize_error(&mut byte_buffer, err); crate::message::serialize_error(&mut byte_buffer, err);
syscall::reply_port_send(reply_port_cap, &byte_buffer.slice(0x10), &[]) syscall::reply_port_send(reply_port_cap, byte_buffer.slice(0x10), &[])
.expect("Failed to reply w/ error") .expect("Failed to reply w/ error")
} }
} }
()
} }
} }

View file

@ -95,7 +95,7 @@ impl Command {
command: SataCommand::DmaReadExt, command: SataCommand::DmaReadExt,
lba, lba,
sector_cnt: lba_count, sector_cnt: lba_count,
paddr: paddr, paddr,
memory_region: None, memory_region: None,
} }
} }

View file

@ -44,9 +44,8 @@ impl PortController {
}; };
// This leaves space for 8 prdt entries. // This leaves space for 8 prdt entries.
for i in 0..32 { for (i, header) in command_list.iter_mut().enumerate() {
command_list[i].command_table_base_addr = header.command_table_base_addr = (command_paddr + 0x500) + (0x100 * (i as u64));
(command_paddr + 0x500) + (0x100 * (i as u64));
} }
let command_slots = array::from_fn(|_| Arc::new(Mutex::new(CommandStatus::Empty))); let command_slots = array::from_fn(|_| Arc::new(Mutex::new(CommandStatus::Empty)));

View file

@ -16,15 +16,16 @@ impl Framebuffer {
}) })
} }
fn draw_pixel(&self, row: u32, col: u32, pixel: u32) { fn draw_pixel(&mut self, row: u32, col: u32, pixel: u32) {
let index = row * (self.fb_info.pitch as u32 / 4) + col; let index = row * (self.fb_info.pitch as u32 / 4) + col;
self.memory_region.mut_slice()[index as usize] = pixel; self.memory_region.mut_slice()[index as usize] = pixel;
} }
pub fn draw_glyph(&self, glyph: &[u8], row: u32, col: u32) { pub fn draw_glyph(&mut self, glyph: &[u8], row: u32, col: u32) {
let gl_width = 8; let gl_width = 8;
let gl_height = 16; let gl_height = 16;
#[allow(clippy::needless_range_loop)]
for r in 0..gl_height { for r in 0..gl_height {
for c in 0..gl_width { for c in 0..gl_width {
if ((glyph[r] >> c) % 2) == 1 { if ((glyph[r] >> c) % 2) == 1 {

View file

@ -23,7 +23,7 @@ pub struct Psf {
impl Psf { impl Psf {
pub fn new(path: &str) -> Result<Self, ZError> { pub fn new(path: &str) -> Result<Self, ZError> {
let file = File::open(&path)?; let file = File::open(path)?;
let header = file.slice()[0..core::mem::size_of::<PsfHeader>()] let header = file.slice()[0..core::mem::size_of::<PsfHeader>()]
.as_ptr() .as_ptr()

View file

@ -62,10 +62,8 @@ impl Terminal {
} }
fn write_line(&mut self, line: &str) { fn write_line(&mut self, line: &str) {
let mut col = 0; for (col, c) in line.chars().enumerate() {
for c in line.chars() { self.console.write_char(c, self.row, col as u32);
self.console.write_char(c, self.row, col);
col += 1;
} }
self.row += 1 self.row += 1

View file

@ -12,7 +12,8 @@ static mut VFS_CLIENT: Option<VFSClient> = None;
fn get_client() -> &'static mut VFSClient { fn get_client() -> &'static mut VFSClient {
unsafe { unsafe {
if let None = VFS_CLIENT { #[allow(static_mut_refs)]
if VFS_CLIENT.is_none() {
let endpoint_cap = yellowstone_yunq::from_init_endpoint() let endpoint_cap = yellowstone_yunq::from_init_endpoint()
.get_endpoint(&yellowstone_yunq::GetEndpointRequest { .get_endpoint(&yellowstone_yunq::GetEndpointRequest {
endpoint_name: "victoriafalls".to_string(), endpoint_name: "victoriafalls".to_string(),
@ -21,6 +22,7 @@ fn get_client() -> &'static mut VFSClient {
VFS_CLIENT = Some(VFSClient::new(Capability::take(endpoint_cap.endpoint))); VFS_CLIENT = Some(VFSClient::new(Capability::take(endpoint_cap.endpoint)));
} }
#[allow(static_mut_refs)]
VFS_CLIENT.as_mut().unwrap() VFS_CLIENT.as_mut().unwrap()
} }
} }

View file

@ -35,7 +35,7 @@ impl VFSServerHandler for VictoriaFallsServerImpl {
let mut inode_num = 2; // Start with root. let mut inode_num = 2; // Start with root.
while let Some(path_token) = tokens.next() { for path_token in tokens {
inode_num = self.find_path_in_dir(inode_num, path_token)?; inode_num = self.find_path_in_dir(inode_num, path_token)?;
} }
@ -57,7 +57,7 @@ impl VFSServerHandler for VictoriaFallsServerImpl {
let mut inode_num = 2; // Start with root. let mut inode_num = 2; // Start with root.
while let Some(path_token) = tokens.next() { for path_token in tokens {
inode_num = self.find_path_in_dir(inode_num, path_token)?; inode_num = self.find_path_in_dir(inode_num, path_token)?;
} }

View file

@ -57,9 +57,8 @@ impl YellowstoneServerContext {
pub fn wait(&self, service: &str) -> Result<(), ZError> { pub fn wait(&self, service: &str) -> Result<(), ZError> {
loop { loop {
match self.service_map.lock().get(service) { if self.service_map.lock().get(service).is_some() {
Some(_) => return Ok(()), return Ok(());
None => {}
} }
self.registration_semaphore.wait().unwrap(); self.registration_semaphore.wait().unwrap();
} }

View file

@ -122,7 +122,7 @@ fn parse_field(field: &Field) -> TokenStream {
let rep_offset = buf.at::<u32>(yunq::message::field_offset(offset, #ind))?; let rep_offset = buf.at::<u32>(yunq::message::field_offset(offset, #ind))?;
let rep_len = buf.at::<u32>(yunq::message::field_offset(offset, #ind) + 4)?; let rep_len = buf.at::<u32>(yunq::message::field_offset(offset, #ind) + 4)?;
yunq::message::parse_repeated_message(buf, offset + rep_offset as usize, rep_len as usize, &caps)? yunq::message::parse_repeated_message(buf, offset + rep_offset as usize, rep_len as usize, caps)?
}; };
}, },
} }
@ -174,7 +174,7 @@ fn generate_serialize(message: &Message) -> TokenStream {
&self, &self,
buf: &mut yunq::ByteBuffer<N>, buf: &mut yunq::ByteBuffer<N>,
offset: usize, offset: usize,
caps: &mut alloc::vec::Vec<z_cap_t>, caps: &mut Vec<z_cap_t>,
) -> Result<usize, ZError> { ) -> Result<usize, ZError> {
let num_fields = #num_fields; let num_fields = #num_fields;
let core_size: u32 = (yunq::message::MESSAGE_HEADER_SIZE + 8 * num_fields) as u32; let core_size: u32 = (yunq::message::MESSAGE_HEADER_SIZE + 8 * num_fields) as u32;
@ -183,10 +183,10 @@ fn generate_serialize(message: &Message) -> TokenStream {
#(#serializers)* #(#serializers)*
buf.write_at(offset + 0, yunq::message::MESSAGE_IDENT)?; buf.write_at(offset, yunq::message::MESSAGE_IDENT)?;
buf.write_at(offset + 4, core_size)?; buf.write_at(offset + 4, core_size)?;
buf.write_at(offset + 8, next_extension)?; buf.write_at(offset + 8, next_extension)?;
buf.write_at(offset + 12, 0 as u32)?; buf.write_at(offset + 12, 0_u32)?;
Ok(next_extension as usize) Ok(next_extension as usize)
} }
} }
@ -200,12 +200,12 @@ fn generate_parse(message: &Message) -> TokenStream {
fn parse<const N: usize>( fn parse<const N: usize>(
buf: &yunq::ByteBuffer<N>, buf: &yunq::ByteBuffer<N>,
offset: usize, offset: usize,
caps: &alloc::vec::Vec<z_cap_t>, caps: &[z_cap_t],
) -> Result<Self, ZError> ) -> Result<Self, ZError>
where where
Self: Sized, Self: Sized,
{ {
if buf.at::<u32>(offset + 0)? != yunq::message::MESSAGE_IDENT { if buf.at::<u32>(offset)? != yunq::message::MESSAGE_IDENT {
mammoth::debug!("Expected IDENT at offest {:#x}, got {:#x}", offset, buf.at::<u32>(offset)?); mammoth::debug!("Expected IDENT at offest {:#x}, got {:#x}", offset, buf.at::<u32>(offset)?);
return Err(ZError::INVALID_ARGUMENT); return Err(ZError::INVALID_ARGUMENT);
} }
@ -303,7 +303,7 @@ fn generate_server_case(method: &Method) -> TokenStream {
#id => { #id => {
let req = #req::parse_from_request(byte_buffer, cap_buffer)?; let req = #req::parse_from_request(byte_buffer, cap_buffer)?;
let resp = self.handler.#name(req)?; let resp = self.handler.#name(req)?;
cap_buffer.resize(0, 0); cap_buffer.clear();
let resp_len = resp.serialize_as_request(0, byte_buffer, cap_buffer)?; let resp_len = resp.serialize_as_request(0, byte_buffer, cap_buffer)?;
Ok(resp_len) Ok(resp_len)
}, },
@ -312,7 +312,7 @@ fn generate_server_case(method: &Method) -> TokenStream {
#id => { #id => {
let req = #req::parse_from_request(byte_buffer, cap_buffer)?; let req = #req::parse_from_request(byte_buffer, cap_buffer)?;
self.handler.#name(req)?; self.handler.#name(req)?;
cap_buffer.resize(0, 0); cap_buffer.clear();
// TODO: Implement serialization for EmptyMessage so this is less hacky. // TODO: Implement serialization for EmptyMessage so this is less hacky.
yunq::message::serialize_error(byte_buffer, ZError::from(0)); yunq::message::serialize_error(byte_buffer, ZError::from(0));
Ok(0x10) Ok(0x10)
@ -321,7 +321,7 @@ fn generate_server_case(method: &Method) -> TokenStream {
(None, Some(_)) => quote! { (None, Some(_)) => quote! {
#id => { #id => {
let resp = self.handler.#name()?; let resp = self.handler.#name()?;
cap_buffer.resize(0, 0); cap_buffer.clear();
let resp_len = resp.serialize_as_request(0, byte_buffer, cap_buffer)?; let resp_len = resp.serialize_as_request(0, byte_buffer, cap_buffer)?;
Ok(resp_len) Ok(resp_len)
}, },
@ -403,7 +403,7 @@ fn generate_async_server_case(method: &Method) -> TokenStream {
#id => { #id => {
let req = #req::parse_from_request(byte_buffer, cap_buffer)?; let req = #req::parse_from_request(byte_buffer, cap_buffer)?;
let resp = self.handler.#name(req).await?; let resp = self.handler.#name(req).await?;
cap_buffer.resize(0, 0); cap_buffer.clear();
let resp_len = resp.serialize_as_request(0, byte_buffer, cap_buffer)?; let resp_len = resp.serialize_as_request(0, byte_buffer, cap_buffer)?;
Ok(resp_len) Ok(resp_len)
}, },
@ -412,7 +412,7 @@ fn generate_async_server_case(method: &Method) -> TokenStream {
#id => { #id => {
let req = #req::parse_from_request(byte_buffer, cap_buffer)?; let req = #req::parse_from_request(byte_buffer, cap_buffer)?;
self.handler.#name(req).await?; self.handler.#name(req).await?;
cap_buffer.resize(0, 0); cap_buffer.clear();
// TODO: Implement serialization for EmptyMessage so this is less hacky. // TODO: Implement serialization for EmptyMessage so this is less hacky.
yunq::message::serialize_error(byte_buffer, ZError::from(0)); yunq::message::serialize_error(byte_buffer, ZError::from(0));
Ok(0x10) Ok(0x10)
@ -421,7 +421,7 @@ fn generate_async_server_case(method: &Method) -> TokenStream {
(None, Some(_)) => quote! { (None, Some(_)) => quote! {
#id => { #id => {
let resp = self.handler.#name().await?; let resp = self.handler.#name().await?;
cap_buffer.resize(0, 0); cap_buffer.clear();
let resp_len = resp.serialize_as_request(0, byte_buffer, cap_buffer)?; let resp_len = resp.serialize_as_request(0, byte_buffer, cap_buffer)?;
Ok(resp_len) Ok(resp_len)
}, },
@ -547,6 +547,7 @@ pub fn generate_code(ast: &[Decl]) -> String {
use alloc::vec::Vec; use alloc::vec::Vec;
use mammoth::zion::z_cap_t; use mammoth::zion::z_cap_t;
use mammoth::zion::ZError; use mammoth::zion::ZError;
#[allow(unused_imports)]
use yunq::ByteBuffer; use yunq::ByteBuffer;
use yunq::YunqMessage; use yunq::YunqMessage;