Compare commits

..

No commits in common. "main" and "0.1.0" have entirely different histories.
main ... 0.1.0

270 changed files with 6259 additions and 11931 deletions

View file

@ -1,15 +0,0 @@
megs: 1024
ata0-master: type=disk, path=builddbg/disk.img, mode=flat, cylinders=512, heads=16, spt=50
display_library: sdl2, options="gui_debug"
boot: disk
com1: enabled=1, mode=file, dev=serial.out, baud=9600, parity=none, bits=8, stopbits=1
cpu: model=corei7_sandy_bridge_2600k
pci: enabled=1, chipset=i440bx
log: bochs.log
# TODO: Make this portable, by building bochs locally.
romimage: file=/home/drew/opt/bochs/share/bochs/BIOS-bochs-latest
vgaromimage: file=/home/drew/opt/bochs/share/bochs/VGABIOS-lgpl-latest.bin
vga: extension=vbe
port_e9_hack: enabled=1
# Breaks on XCHGW %BX, %BX
magic_break: enabled=1

View file

@ -1,42 +0,0 @@
name: Check
on:
push:
branches: [main]
pull_request:
branches: [main]
env:
# Should speed up builds.
CARGO_INCREMENTAL: 0
# Should reduce the size of ./target to improve cache load/store.
CARGO_PROFILE_TEST_DEBUG: 0
jobs:
check:
name: Check Rust
runs-on: docker
steps:
- name: Checkout Repo
uses: actions/checkout@v4
- name: Install Rust
uses: https://codeberg.org/wackbyte/rust-toolchain@trunk
with:
toolchain: nightly-2025-10-02
components: rustfmt, clippy, rust-src
- name: Cache
uses: https://github.com/Swatinem/rust-cache@v2
with:
# Don't cache ~/.cargo/bin since we restore the cache after we install things there
cache-bin: "false"
workspaces: "backend"
- name: "Check Format"
run: cargo fmt --check
working-directory: rust
- name: "Lint"
run: |
rustup component add clippy
cargo clippy --locked -- -D warnings
working-directory: rust

13
.gitignore vendored
View file

@ -1,17 +1,6 @@
builddbg/ builddbg/
test-bin/
__pycache__/ __pycache__/
.ccls-cache/
compile_commands.json compile_commands.json
bochs.log
serial.out
sysroot/bin sysroot/bin
sysroot/usr sysroot/usr/bin
sysroot/.crates.toml
sysroot/.crates2.json
rust/target
yunq/venv
yunq/rust/target

View file

@ -1,4 +1,4 @@
cmake_minimum_required(VERSION 3.5) cmake_minimum_required(VERSION 3.2)
# Set because our cross compiler can't do dynamic linking? # Set because our cross compiler can't do dynamic linking?
set(CMAKE_TRY_COMPILE_TARGET_TYPE "STATIC_LIBRARY") set(CMAKE_TRY_COMPILE_TARGET_TYPE "STATIC_LIBRARY")
@ -9,19 +9,11 @@ set(CMAKE_CXX_STANDARD 20)
set(CMAKE_CXX_STANDARD_REQUIRED True) set(CMAKE_CXX_STANDARD_REQUIRED True)
set(CMAKE_EXPORT_COMPILE_COMMANDS True) set(CMAKE_EXPORT_COMPILE_COMMANDS True)
set(BASE_COMPILE_FLAGS "-ffreestanding -fno-rtti -fno-exceptions -mincoming-stack-boundary=3") set(BASE_COMPILE_FLAGS "-ffreestanding -fno-rtti -fno-exceptions")
set(BASE_LINK_FLAGS "-nostdlib") set(BASE_LINK_FLAGS "-nostdlib")
if (enable_testing)
include(CTest)
find_package(Catch2 3 REQUIRED)
find_program(MEMORYCHECK_COMMAND valgrind)
set(MEMORYCHECK_COMMAND_OPTIONS "--trace-children=yes --leak-check=full")
add_custom_target(build_test)
endif()
add_subdirectory(zion) add_subdirectory(zion)
add_subdirectory(yunq)
add_subdirectory(lib) add_subdirectory(lib)
add_subdirectory(yunq)
add_subdirectory(usr) add_subdirectory(usr)
add_subdirectory(sys) add_subdirectory(sys)

View file

@ -1,24 +1,5 @@
# Releases # Releases
## AcadiaOS 0.1.1 (WIP)
### Denali
- AHCI Driver can use more than one command slot.
- Resets AHCI Controller on start.
- Uses IDENTIFY DEVICE to get sector size.
### Glacier
- Unit Testing setup for Host Machine
- Unit Tests for: Vector
- Added Iterators for: Vector, Array, ArrayView
- HashMap Move Semantics
### Yunq
- Moved message parsing/serialization to shared library.
## AcadiaOS 0.1.0 (2023-12-08) ## AcadiaOS 0.1.0 (2023-12-08)
This marks the first release of AcadiaOS! There is very little user functionality currently but a This marks the first release of AcadiaOS! There is very little user functionality currently but a

View file

@ -29,7 +29,3 @@ target_include_directories(glacier_kernel
set_target_properties(glacier_kernel PROPERTIES set_target_properties(glacier_kernel PROPERTIES
COMPILE_FLAGS "${CMAKE_CXX_FLAGS} ${BASE_COMPILE_FLAGS} -mcmodel=kernel -mgeneral-regs-only") COMPILE_FLAGS "${CMAKE_CXX_FLAGS} ${BASE_COMPILE_FLAGS} -mcmodel=kernel -mgeneral-regs-only")
if (enable_testing)
add_subdirectory(test)
endif()

View file

@ -2,7 +2,6 @@
#include <stdint.h> #include <stdint.h>
#include "glacier/container/array_iter.h"
#include "glacier/container/array_view.h" #include "glacier/container/array_view.h"
namespace glcr { namespace glcr {
@ -38,13 +37,6 @@ class Array {
uint64_t size() const { return size_; } uint64_t size() const { return size_; }
bool empty() const { return size_ == 0; } bool empty() const { return size_ == 0; }
typedef ArrayIterator<T> Iterator;
Iterator begin() { return {data_, size_}; }
const Iterator begin() const { return {data_, size_}; }
Iterator end() { return {nullptr, 0}; }
const Iterator end() const { return {nullptr, 0}; }
private: private:
T* data_; T* data_;
uint64_t size_; uint64_t size_;

View file

@ -1,40 +0,0 @@
#pragma once
#include <stdint.h>
namespace glcr {
template <typename T>
class ArrayIterator {
public:
ArrayIterator(T* item, uint64_t size) : item_(item), size_(size) {}
ArrayIterator next() {
if (size_ <= 1) {
return {nullptr, 0};
}
return {item_ + 1, size_ - 1};
}
ArrayIterator& operator++() {
if (size_ <= 1) {
item_ = nullptr;
size_ = 0;
} else {
item_++;
size_--;
}
return *this;
}
T& operator*() { return *item_; }
T* operator->() { return item_; }
bool operator==(const ArrayIterator& other) { return item_ == other.item_; }
bool operator!=(const ArrayIterator& other) { return item_ != other.item_; }
private:
T* item_;
uint64_t size_;
};
} // namespace glcr

View file

@ -2,8 +2,6 @@
#include <stdint.h> #include <stdint.h>
#include "glacier/container/array_iter.h"
namespace glcr { namespace glcr {
template <typename T> template <typename T>
@ -12,9 +10,7 @@ class ArrayView {
ArrayView() : data_(nullptr), size_(0) {} ArrayView() : data_(nullptr), size_(0) {}
ArrayView(const ArrayView&) = default; ArrayView(const ArrayView&) = default;
ArrayView& operator=(const ArrayView&) = default;
ArrayView(ArrayView&&) = default; ArrayView(ArrayView&&) = default;
ArrayView& operator=(ArrayView&&) = default;
ArrayView(T* data, uint64_t size) : data_(data), size_(size) {} ArrayView(T* data, uint64_t size) : data_(data), size_(size) {}
@ -28,23 +24,6 @@ class ArrayView {
uint64_t size() const { return size_; } uint64_t size() const { return size_; }
bool empty() const { return size_; } bool empty() const { return size_; }
typedef ArrayIterator<T> Iterator;
Iterator begin() {
if (size_ == 0) {
return {nullptr, 0};
}
return {data_, size_};
}
const Iterator begin() const {
if (size_ == 0) {
return {nullptr, 0};
}
return {data_, size_};
}
Iterator end() { return {nullptr, 0}; }
const Iterator end() const { return {nullptr, 0}; }
private: private:
T* data_; T* data_;
uint64_t size_; uint64_t size_;

View file

@ -18,8 +18,9 @@ class HashMap {
HashMap() = default; HashMap() = default;
HashMap(const HashMap&) = delete; HashMap(const HashMap&) = delete;
HashMap& operator=(const HashMap&) = delete; HashMap& operator=(const HashMap&) = delete;
HashMap(HashMap&&); // TODO: Implement Move.
HashMap& operator=(HashMap&&); HashMap(HashMap&&) = delete;
HashMap& operator=(HashMap&&) = delete;
// Accessors. // Accessors.
uint64_t size() { return size_; } uint64_t size() { return size_; }
@ -62,21 +63,6 @@ class HashMap {
void ResizeIfNecessary(); void ResizeIfNecessary();
}; };
template <typename K, typename V, class H>
HashMap<K, V, H>::HashMap(HashMap&& other) {
data_ = glcr::Move(other.data_);
size_ = other.size_;
other.size_ = 0;
}
template <typename K, typename V, class H>
HashMap<K, V, H>& HashMap<K, V, H>::operator=(HashMap&& other) {
data_ = glcr::Move(other.data_);
size_ = other.size_;
other.size_ = 0;
return *this;
}
template <typename K, typename V, class H> template <typename K, typename V, class H>
V& HashMap<K, V, H>::at(const K& key) { V& HashMap<K, V, H>::at(const K& key) {
uint64_t hc = H()(key); uint64_t hc = H()(key);
@ -88,8 +74,7 @@ V& HashMap<K, V, H>::at(const K& key) {
} }
} }
// TODO: Add a failure mode here instead of constructing an object. // TODO: Add a failure mode here instead of constructing an object.
K k2 = key; ll.PushFront({key, {}});
ll.PushFront({glcr::Move(k2), {}});
return ll.PeekFront().second(); return ll.PeekFront().second();
} }
@ -209,7 +194,8 @@ template <typename K, typename V, class H>
void HashMap<K, V, H>::Resize(uint64_t new_size) { void HashMap<K, V, H>::Resize(uint64_t new_size) {
Array<LinkedList<Pair<K, V>>> new_data(new_size); Array<LinkedList<Pair<K, V>>> new_data(new_size);
for (auto& ll : data_) { for (uint64_t i = 0; i < data_.size(); i++) {
auto& ll = data_[i];
while (!ll.empty()) { while (!ll.empty()) {
auto pair = ll.PopFront(); auto pair = ll.PopFront();
uint64_t hc = H()(pair.first()); uint64_t hc = H()(pair.first());

View file

@ -20,8 +20,6 @@ class IntrusiveList {
void PushFront(const RefPtr<T>& obj); void PushFront(const RefPtr<T>& obj);
void PushBack(const RefPtr<T>& obj); void PushBack(const RefPtr<T>& obj);
void Remove(const RefPtr<T>& obj);
RefPtr<T> PopFront(); RefPtr<T> PopFront();
RefPtr<T> PopBack(); RefPtr<T> PopBack();
@ -66,29 +64,6 @@ void IntrusiveList<T>::PushBack(const RefPtr<T>& obj) {
back_ = obj; back_ = obj;
} }
template <typename T>
void IntrusiveList<T>::Remove(const RefPtr<T>& obj) {
if (!obj) {
return;
}
if (front_ == obj) {
front_ = obj->next_;
}
if (back_ == obj) {
back_ = obj->prev_;
}
if (obj->prev_) {
obj->prev_->next_ = obj->next_;
}
if (obj->next_) {
obj->next_->prev_ = obj->prev_;
}
obj->prev_ = nullptr;
obj->next_ = nullptr;
size_--;
}
template <typename T> template <typename T>
RefPtr<T> IntrusiveList<T>::PopFront() { RefPtr<T> IntrusiveList<T>::PopFront() {
if (front_ == nullptr) { if (front_ == nullptr) {

View file

@ -1,6 +1,5 @@
#pragma once #pragma once
#include <glacier/container/array_iter.h>
#include <glacier/memory/move.h> #include <glacier/memory/move.h>
#include <stdint.h> #include <stdint.h>
@ -38,7 +37,6 @@ class Vector {
// Setters. // Setters.
// FIXME: Handle downsizing. // FIXME: Handle downsizing.
// TODO: Rename this so it is clear that this only affects capacity.
void Resize(uint64_t capacity); void Resize(uint64_t capacity);
void PushBack(const T& item); void PushBack(const T& item);
@ -47,16 +45,8 @@ class Vector {
template <typename... Args> template <typename... Args>
void EmplaceBack(Args&&... args); void EmplaceBack(Args&&... args);
T& PeekBack();
T&& PopBack(); T&& PopBack();
typedef ArrayIterator<T> Iterator;
Iterator begin() { return {data_, size_}; }
const Iterator begin() const { return {data_, size_}; }
Iterator end() { return {nullptr, 0}; }
const Iterator end() const { return {nullptr, 0}; }
private: private:
T* data_; T* data_;
uint64_t size_; uint64_t size_;
@ -131,11 +121,6 @@ void Vector<T>::EmplaceBack(Args&&... args) {
data_[size_++] = T(args...); data_[size_++] = T(args...);
} }
template <typename T>
T& Vector<T>::PeekBack() {
return data_[size_ - 1];
}
template <typename T> template <typename T>
T&& Vector<T>::PopBack() { T&& Vector<T>::PopBack() {
size_--; size_--;

View file

@ -32,7 +32,7 @@ String::String(const String& other) : String(other.cstr_, other.length_) {}
String& String::operator=(const String& other) { String& String::operator=(const String& other) {
if (cstr_) { if (cstr_) {
delete[] cstr_; delete cstr_;
} }
length_ = other.length_; length_ = other.length_;
cstr_ = new char[length_ + 1]; cstr_ = new char[length_ + 1];
@ -51,7 +51,7 @@ String::String(String&& other) : cstr_(other.cstr_), length_(other.length_) {
String& String::operator=(String&& other) { String& String::operator=(String&& other) {
if (cstr_) { if (cstr_) {
delete[] cstr_; delete cstr_;
} }
cstr_ = other.cstr_; cstr_ = other.cstr_;
length_ = other.length_; length_ = other.length_;
@ -64,7 +64,7 @@ String& String::operator=(String&& other) {
String::~String() { String::~String() {
if (cstr_) { if (cstr_) {
delete[] cstr_; delete cstr_;
} }
} }

View file

@ -1,2 +0,0 @@
add_subdirectory(container)

View file

@ -1,8 +0,0 @@
add_executable(glc_vec_test vector.cpp)
target_link_libraries(glc_vec_test glacier Catch2::Catch2WithMain)
target_include_directories(glc_vec_test PRIVATE "../..")
add_test(NAME glc_vec_test COMMAND $<TARGET_FILE:glc_vec_test>)
add_dependencies(build_test
glc_vec_test)

View file

@ -1,172 +0,0 @@
#include "container/vector.h"
#include <catch2/catch_test_macros.hpp>
using namespace glcr;
TEST_CASE("Empty Vector", "[vector]") {
Vector<uint64_t> v;
REQUIRE(v.size() == 0);
REQUIRE(v.empty());
}
TEST_CASE("Push/Pop Vector", "[vector]") {
Vector<uint64_t> v;
v.PushBack(42);
REQUIRE(v.size() == 1);
REQUIRE(v.capacity() >= 1);
v.PushBack(33);
REQUIRE(v.size() == 2);
REQUIRE(v.capacity() >= 2);
REQUIRE(v.at(0) == 42);
REQUIRE(v[0] == 42);
REQUIRE(v.at(1) == 33);
REQUIRE(v[1] == 33);
REQUIRE(v.PopBack() == 33);
REQUIRE(v.size() == 1);
REQUIRE(v.PopBack() == 42);
REQUIRE(v.size() == 0);
}
class ConstructRecorder {
public:
static uint64_t construct_cnt;
static uint64_t copy_cnt;
static uint64_t move_cnt;
ConstructRecorder() { construct_cnt++; }
ConstructRecorder(const ConstructRecorder&) { copy_cnt++; }
ConstructRecorder& operator=(const ConstructRecorder&) {
copy_cnt++;
return *this;
}
ConstructRecorder(ConstructRecorder&&) { move_cnt++; }
ConstructRecorder& operator=(ConstructRecorder&&) {
move_cnt++;
return *this;
}
static void Reset() {
construct_cnt = 0;
copy_cnt = 0;
move_cnt = 0;
}
private:
uint64_t dummy_data = 0;
};
uint64_t ConstructRecorder::construct_cnt = 0;
uint64_t ConstructRecorder::copy_cnt = 0;
uint64_t ConstructRecorder::move_cnt = 0;
TEST_CASE("Data-Type Construction", "[vector]") {
ConstructRecorder::Reset();
Vector<ConstructRecorder> v;
SECTION("Copy Insert") {
ConstructRecorder obj;
v.PushBack(obj);
// This is overfitted on the implementation which also default constructs
// the held objects when allocating a new backing array.
REQUIRE(ConstructRecorder::construct_cnt == 2);
REQUIRE(ConstructRecorder::copy_cnt == 1);
REQUIRE(ConstructRecorder::move_cnt == 0);
}
SECTION("Move Insert") {
ConstructRecorder obj;
v.PushBack(glcr::Move(obj));
// This is overfitted on the implementation which also default constructs
// the held objects when allocating a new backing array.
REQUIRE(ConstructRecorder::construct_cnt == 2);
REQUIRE(ConstructRecorder::copy_cnt == 0);
REQUIRE(ConstructRecorder::move_cnt == 1);
}
SECTION("RValue Insert") {
v.PushBack({});
// This is overfitted on the implementation which also default constructs
// the held objects when allocating a new backing array.
REQUIRE(ConstructRecorder::construct_cnt == 2);
REQUIRE(ConstructRecorder::copy_cnt == 0);
REQUIRE(ConstructRecorder::move_cnt == 1);
}
SECTION("Emplace Insert") {
v.EmplaceBack();
// This is overfitted on the implementation which also default constructs
// the held objects when allocating a new backing array.
REQUIRE(ConstructRecorder::construct_cnt == 2);
REQUIRE(ConstructRecorder::copy_cnt == 0);
REQUIRE(ConstructRecorder::move_cnt == 1);
}
SECTION("PopBack Move") {
v.EmplaceBack();
ConstructRecorder obj = v.PopBack();
// This is overfitted on the implementation which also default constructs
// the held objects when allocating a new backing array.
REQUIRE(ConstructRecorder::construct_cnt == 2);
REQUIRE(ConstructRecorder::copy_cnt == 0);
// 1 from emplace, 1 from pop. (No additional regular constructions).
REQUIRE(ConstructRecorder::move_cnt == 2);
}
}
TEST_CASE("Vector Move", "[vector]") {
ConstructRecorder::Reset();
Vector<ConstructRecorder> v;
v.PushBack({});
v.PushBack({});
v.PushBack({});
uint64_t construct = ConstructRecorder::construct_cnt;
uint64_t copy = ConstructRecorder::copy_cnt;
uint64_t move = ConstructRecorder::move_cnt;
Vector<ConstructRecorder> v2(glcr::Move(v));
REQUIRE(v2.size() == 3);
REQUIRE(v2.capacity() >= 3);
REQUIRE(ConstructRecorder::construct_cnt == construct);
REQUIRE(ConstructRecorder::copy_cnt == copy);
REQUIRE(ConstructRecorder::move_cnt == move);
Vector<ConstructRecorder> v3 = glcr::Move(v2);
REQUIRE(v3.size() == 3);
REQUIRE(v3.capacity() >= 3);
REQUIRE(ConstructRecorder::construct_cnt == construct);
REQUIRE(ConstructRecorder::copy_cnt == copy);
REQUIRE(ConstructRecorder::move_cnt == move);
}
TEST_CASE("Vector Iterator", "[vector]") {
Vector<uint64_t> v;
for (uint64_t i = 0; i < 100; i++) {
v.PushBack(42);
}
SECTION("For Range Loop") {
uint64_t iters = 0;
for (uint64_t i : v) {
REQUIRE(i == 42);
iters++;
}
REQUIRE(iters == 100);
}
SECTION("Raw Iter Loop") {
uint64_t iters = 0;
for (auto it = v.begin(); it != v.end(); ++it) {
REQUIRE(*it == 42);
iters++;
}
REQUIRE(iters == 100);
}
}

View file

@ -1,7 +1,14 @@
add_library(mammoth STATIC add_library(mammoth STATIC
file/file.cpp
input/keyboard.cpp
ipc/channel.cpp
ipc/endpoint_client.cpp
ipc/endpoint_server.cpp
ipc/port_client.cpp ipc/port_client.cpp
ipc/port_server.cpp ipc/port_server.cpp
proc/process.cpp
proc/thread.cpp proc/thread.cpp
sync/mutex.cpp
sync/semaphore.cpp sync/semaphore.cpp
util/debug.cpp util/debug.cpp
util/init.cpp util/init.cpp
@ -17,6 +24,7 @@ target_include_directories(mammoth
target_link_libraries(mammoth target_link_libraries(mammoth
glacier glacier
victoriafalls_yunq
yellowstone_yunq yellowstone_yunq
voyageurs_yunq voyageurs_yunq
zion_stub zion_stub

81
lib/mammoth/file/file.cpp Normal file
View file

@ -0,0 +1,81 @@
#include "file/file.h"
#include <glacier/string/str_split.h>
#include <mammoth/util/init.h>
#include <victoriafalls/victoriafalls.yunq.client.h>
#include <yellowstone/yellowstone.yunq.client.h>
#include <zcall.h>
#include "util/debug.h"
namespace mmth {
namespace {
using yellowstone::Endpoint;
using yellowstone::GetEndpointRequest;
using yellowstone::YellowstoneClient;
VFSClient* gVfsClient = nullptr;
void GetVfsClientIfNeeded() {
if (gVfsClient == nullptr) {
// TODO: Add an unowned client so we don't have to duplicate this cap every
// time.
uint64_t dup_cap;
check(ZCapDuplicate(gInitEndpointCap, kZionPerm_All, &dup_cap));
YellowstoneClient client(dup_cap);
GetEndpointRequest yreq;
yreq.set_endpoint_name("victoriafalls");
Endpoint yresp;
check(client.GetEndpoint(yreq, yresp));
gVfsClient = new VFSClient(yresp.endpoint());
}
}
} // namespace
void SetVfsCap(z_cap_t vfs_cap) { gVfsClient = new VFSClient(vfs_cap); }
File File::Open(glcr::StringView path) {
GetVfsClientIfNeeded();
OpenFileRequest req;
req.set_path(path);
OpenFileResponse resp;
check(gVfsClient->OpenFile(req, resp));
return File(OwnedMemoryRegion::FromCapability(resp.memory()), resp.size());
}
glcr::StringView File::as_str() {
return glcr::StringView((char*)raw_ptr(), size_);
}
void* File::raw_ptr() { return reinterpret_cast<void*>(file_data_.vaddr()); }
uint8_t* File::byte_ptr() {
return reinterpret_cast<uint8_t*>(file_data_.vaddr());
}
glcr::ErrorOr<glcr::Vector<glcr::String>> ListDirectory(glcr::StringView path) {
GetVfsClientIfNeeded();
GetDirectoryRequest req;
req.set_path(path);
Directory dir;
auto status = gVfsClient->GetDirectory(req, dir);
if (!status.ok()) {
dbgln("Error in getting directory: {}", status.message());
return status.code();
}
auto file_views = glcr::StrSplit(dir.filenames(), ',');
glcr::Vector<glcr::String> files;
for (uint64_t i = 0; i < file_views.size(); i++) {
files.PushBack(file_views[i]);
}
return files;
}
} // namespace mmth

38
lib/mammoth/file/file.h Normal file
View file

@ -0,0 +1,38 @@
#pragma once
#include <glacier/container/vector.h>
#include <glacier/memory/move.h>
#include <glacier/status/error_or.h>
#include <glacier/string/string.h>
#include <glacier/string/string_view.h>
#include "mammoth/util/memory_region.h"
namespace mmth {
// Intended for use in yellowstone since it already has the VFS cap.
void SetVfsCap(z_cap_t vfs_cap);
class File {
public:
static File Open(glcr::StringView path);
uint64_t size() { return size_; }
glcr::StringView as_str();
void* raw_ptr();
uint8_t* byte_ptr();
private:
OwnedMemoryRegion file_data_;
uint64_t size_;
File(OwnedMemoryRegion&& file, uint64_t size)
: file_data_(glcr::Move(file)), size_(size) {}
};
// TODO: Move this to a separate file.
glcr::ErrorOr<glcr::Vector<glcr::String>> ListDirectory(glcr::StringView path);
} // namespace mmth

View file

@ -0,0 +1,283 @@
#include "input/keyboard.h"
#include <mammoth/util/init.h>
#include <voyageurs/voyageurs.yunq.client.h>
#include <yellowstone/yellowstone.yunq.client.h>
#include "util/debug.h"
namespace mmth {
namespace {
using yellowstone::Endpoint;
using yellowstone::GetEndpointRequest;
using yellowstone::YellowstoneClient;
void KeyboardListenerEntry(void* keyboard_base) {
reinterpret_cast<KeyboardListenerBase*>(keyboard_base)->ListenLoop();
}
} // namespace
KeyboardListenerBase::KeyboardListenerBase() {
auto server_or = PortServer::Create();
if (!server_or) {
crash("Failed to create server", server_or.error());
}
server_ = server_or.value();
}
void KeyboardListenerBase::Register() {
uint64_t dup_cap;
check(ZCapDuplicate(gInitEndpointCap, kZionPerm_All, &dup_cap));
YellowstoneClient client(dup_cap);
GetEndpointRequest req;
req.set_endpoint_name("voyageurs");
Endpoint endpt;
check(client.GetEndpoint(req, endpt));
VoyageursClient vclient(endpt.endpoint());
KeyboardListener listn;
// TODO: Create a "ASSIGN_OR_CRASH" macro to simplify this.
auto client_or = server_.CreateClient();
if (!client_or.ok()) {
crash("Failed to create client", client_or.error());
}
listn.set_port_capability(client_or.value().cap());
check(vclient.RegisterKeyboardListener(listn));
}
Thread KeyboardListenerBase::Listen() {
return Thread(KeyboardListenerEntry, this);
}
void KeyboardListenerBase::ListenLoop() {
while (true) {
auto scancode_or = server_.RecvChar();
if (!scancode_or.ok()) {
check(scancode_or.error());
}
uint8_t scancode = scancode_or.value();
if (scancode == 0xE0) {
extended_on_ = true;
continue;
}
Keycode k = ScancodeToKeycode(scancode);
Action a = ScancodeToAction(scancode);
HandleKeycode(k, a);
}
}
void KeyboardListenerBase::HandleKeycode(Keycode code, Action action) {
char c = '\0';
if (action == kPressed) {
if (code >= kA && code <= kZ) {
if (IsShift()) {
const char* alpha = "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
c = alpha[code - kA];
} else {
const char* alpha = "abcdefghijklmnopqrstuvwxyz";
c = alpha[code - kA];
}
} else if (code >= k1 && code <= k0) {
if (IsShift()) {
const char* num = "!@#$%^&*()";
c = num[code - k1];
} else {
const char* num = "1234567890";
c = num[code - k1];
}
} else if (code >= kMinus && code <= kBacktick) {
if (IsShift()) {
const char* sym = "_+{}|?:\"<>~";
c = sym[code - kMinus];
} else {
const char* sym = "-=[]\\/;',.`";
c = sym[code - kMinus];
}
} else if (code == kEnter) {
c = '\n';
} else if (code == kSpace) {
c = ' ';
} else if (code == kTab) {
c = '\t';
} else if (code == kBackspace) {
c = '\b';
} else if (code == kLShift) {
lshift_ = true;
} else if (code == kRShift) {
rshift_ = true;
}
} else if (action == kReleased) {
if (code == kLShift) {
lshift_ = false;
} else if (code == kRShift) {
rshift_ = false;
}
}
if (c != '\0') {
HandleCharacter(c);
}
}
Keycode KeyboardListenerBase::ScancodeToKeycode(uint8_t scancode) {
// Cancel out the released bit.
scancode &= 0x7F;
if (extended_on_) {
extended_on_ = false;
switch (scancode) {
case 0x1D:
return kRCtrl;
case 0x38:
return kRAlt;
case 0x48:
return kUp;
case 0x4B:
return kLeft;
case 0x4D:
return kRight;
case 0x50:
return kDown;
case 0x53:
return kDelete;
case 0x5B:
return kSuper;
}
dbgln("Unknown extended scancode {x}", scancode);
return kUnknownKeycode;
}
switch (scancode) {
case 0x01:
return kEsc;
case 0x02:
return k1;
case 0x03:
return k2;
case 0x04:
return k3;
case 0x05:
return k4;
case 0x06:
return k5;
case 0x07:
return k6;
case 0x08:
return k7;
case 0x09:
return k8;
case 0x0A:
return k9;
case 0x0B:
return k0;
case 0x0C:
return kMinus;
case 0x0D:
return kEquals;
case 0x0E:
return kBackspace;
case 0x0F:
return kTab;
case 0x10:
return kQ;
case 0x11:
return kW;
case 0x12:
return kE;
case 0x13:
return kR;
case 0x14:
return kT;
case 0x15:
return kY;
case 0x16:
return kU;
case 0x17:
return kI;
case 0x18:
return kO;
case 0x19:
return kP;
case 0x1A:
return kLBrace;
case 0x1B:
return kRBrace;
case 0x1C:
return kEnter;
case 0x1D:
return kLCtrl;
case 0x1E:
return kA;
case 0x1F:
return kS;
case 0x20:
return kD;
case 0x21:
return kF;
case 0x22:
return kG;
case 0x23:
return kH;
case 0x24:
return kJ;
case 0x25:
return kK;
case 0x26:
return kL;
case 0x27:
return kSemicolon;
case 0x28:
return kQuote;
case 0x29:
return kBacktick;
case 0x2A:
return kLShift;
case 0x2B:
return kBSlash;
case 0x2C:
return kZ;
case 0x2D:
return kX;
case 0x2E:
return kC;
case 0x2F:
return kV;
case 0x30:
return kB;
case 0x31:
return kN;
case 0x32:
return kM;
case 0x33:
return kComma;
case 0x34:
return kPeriod;
case 0x35:
return kFSlash;
case 0x36:
return kRShift;
case 0x38:
return kLAlt;
case 0x39:
return kSpace;
}
dbgln("Unknown scancode {x}", scancode);
return kUnknownKeycode;
}
Action KeyboardListenerBase::ScancodeToAction(uint8_t scancode) {
return (scancode & 0x80) ? kReleased : kPressed;
}
} // namespace mmth

View file

@ -0,0 +1,123 @@
#pragma once
#include "mammoth/ipc/port_server.h"
#include "mammoth/proc/thread.h"
namespace mmth {
enum Keycode {
kUnknownKeycode = 0x0,
kA = 0x1,
kB = 0x2,
kC = 0x3,
kD = 0x4,
kE = 0x5,
kF = 0x6,
kG = 0x7,
kH = 0x8,
kI = 0x9,
kJ = 0xA,
kK = 0xB,
kL = 0xC,
kM = 0xD,
kN = 0xE,
kO = 0xF,
kP = 0x10,
kQ = 0x11,
kR = 0x12,
kS = 0x13,
kT = 0x14,
kU = 0x15,
kV = 0x16,
kW = 0x17,
kX = 0x18,
kY = 0x19,
kZ = 0x1A,
k1 = 0x20,
k2 = 0x21,
k3 = 0x22,
k4 = 0x23,
k5 = 0x24,
k6 = 0x25,
k7 = 0x26,
k8 = 0x27,
k9 = 0x28,
k0 = 0x29,
kSpace = 0x30,
kEnter = 0x31,
kTab = 0x32,
kBackspace = 0x33,
kDelete = 0x34,
kMinus = 0x40,
kEquals = 0x41,
kLBrace = 0x42,
kRBrace = 0x43,
kBSlash = 0x44,
kFSlash = 0x45,
kSemicolon = 0x46,
kQuote = 0x47,
kComma = 0x48,
kPeriod = 0x49,
kBacktick = 0x4A,
kLShift = 0x50,
kRShift = 0x51,
kLCtrl = 0x52,
kRCtrl = 0x53,
kLAlt = 0x54,
kRAlt = 0x55,
kSuper = 0x56,
kEsc = 0x57,
kUp = 0x58,
kDown = 0x59,
kLeft = 0x5A,
kRight = 0x5B,
};
enum Action {
kUnknownAction,
kPressed,
kReleased,
};
class KeyboardListenerBase {
public:
KeyboardListenerBase();
KeyboardListenerBase(const KeyboardListenerBase&) = delete;
KeyboardListenerBase(KeyboardListenerBase&&) = delete;
void Register();
Thread Listen();
void ListenLoop();
// Override this to recieve all raw keycodes. By default
// this function will try to translate each keycode into
// a printable character and call HandleCharacter.
virtual void HandleKeycode(Keycode code, Action action);
// This function is called by the default HandleKeycode
// implementation if you do not override it. If it recieves
// input that corresponds to a printable character it will
virtual void HandleCharacter(char c){};
private:
PortServer server_;
bool extended_on_ = false;
bool lshift_ = false;
bool rshift_ = false;
Keycode ScancodeToKeycode(uint8_t scancode);
Action ScancodeToAction(uint8_t scancode);
bool IsShift() { return lshift_ || rshift_; }
};
} // namespace mmth

View file

@ -0,0 +1,60 @@
#include "ipc/channel.h"
#include <zcall.h>
#include "util/debug.h"
namespace mmth {
namespace {
uint64_t strlen(const char* ptr) {
uint64_t len = 0;
while (*ptr != '\0') {
len++;
ptr++;
}
return len;
}
} // namespace
void Channel::adopt_cap(uint64_t id) {
if (chan_cap_ != 0) {
crash("Adopting over channel.", glcr::ALREADY_EXISTS);
}
chan_cap_ = id;
}
z_cap_t Channel::release_cap() {
z_cap_t cap = chan_cap_;
chan_cap_ = 0;
return cap;
}
z_cap_t Channel::cap() { return chan_cap_; }
z_err_t Channel::WriteStr(const char* msg) {
if (!chan_cap_) {
return glcr::NULL_PTR;
}
return ZChannelSend(chan_cap_, strlen(msg), msg, 0, nullptr);
}
z_err_t Channel::ReadStr(char* buffer, uint64_t* size) {
if (!chan_cap_) {
return glcr::NULL_PTR;
}
uint64_t num_caps = 0;
return ZChannelRecv(chan_cap_, size, reinterpret_cast<uint8_t*>(buffer),
&num_caps, nullptr);
}
z_err_t CreateChannels(Channel& c1, Channel& c2) {
z_cap_t chan1, chan2;
RET_ERR(ZChannelCreate(&chan1, &chan2));
c1.adopt_cap(chan1);
c2.adopt_cap(chan2);
return glcr::OK;
}
} // namespace mmth

51
lib/mammoth/ipc/channel.h Normal file
View file

@ -0,0 +1,51 @@
#pragma once
#include <glacier/status/error.h>
#include <stdint.h>
#include <zcall.h>
namespace mmth {
class Channel {
public:
Channel() {}
void adopt_cap(uint64_t id);
z_cap_t release_cap();
z_cap_t cap();
z_err_t WriteStr(const char* msg);
z_err_t ReadStr(char* buffer, uint64_t* size);
template <typename T>
z_err_t WriteStruct(T*);
template <typename T>
z_err_t ReadStructAndCap(T*, uint64_t*);
// FIXME: Close channel here.
~Channel() {}
private:
z_cap_t chan_cap_ = 0;
};
uint64_t CreateChannels(Channel& c1, Channel& c2);
template <typename T>
z_err_t Channel::WriteStruct(T* obj) {
return ZChannelSend(chan_cap_, sizeof(T), obj, 0, nullptr);
}
template <typename T>
z_err_t Channel::ReadStructAndCap(T* obj, uint64_t* cap) {
uint64_t num_bytes = sizeof(T);
uint64_t num_caps = 1;
RET_ERR(ZChannelRecv(chan_cap_, &num_bytes, obj, &num_caps, cap));
if (num_caps != 1 || num_bytes != sizeof(T)) {
return glcr::FAILED_PRECONDITION;
}
return glcr::OK;
}
} // namespace mmth

View file

@ -0,0 +1,9 @@
#include "ipc/endpoint_server.h"
namespace mmth {
glcr::UniquePtr<EndpointClient> EndpointClient::AdoptEndpoint(z_cap_t cap) {
return glcr::UniquePtr<EndpointClient>(new EndpointClient(cap));
}
} // namespace mmth

View file

@ -0,0 +1,69 @@
#pragma once
#include <glacier/container/pair.h>
#include <glacier/memory/unique_ptr.h>
#include <glacier/status/error_or.h>
#include <zcall.h>
#include <ztypes.h>
namespace mmth {
class EndpointClient {
public:
EndpointClient() = delete;
EndpointClient(const EndpointClient&) = delete;
EndpointClient& operator=(const EndpointClient&) = delete;
static glcr::UniquePtr<EndpointClient> AdoptEndpoint(z_cap_t cap);
template <typename Req, typename Resp>
glcr::ErrorOr<glcr::Pair<Resp, z_cap_t>> CallEndpointGetCap(const Req& req);
template <typename Req, typename Resp>
glcr::ErrorOr<Resp> CallEndpoint(const Req& req);
z_cap_t GetCap() const { return cap_; }
private:
EndpointClient(uint64_t cap) : cap_(cap) {}
z_cap_t cap_;
};
template <typename Req, typename Resp>
glcr::ErrorOr<glcr::Pair<Resp, z_cap_t>> EndpointClient::CallEndpointGetCap(
const Req& req) {
uint64_t reply_port_cap;
RET_ERR(ZEndpointSend(cap_, sizeof(Req), &req, 0, nullptr, &reply_port_cap));
Resp resp;
z_cap_t cap = 0;
uint64_t num_caps = 1;
uint64_t num_bytes = sizeof(Resp);
RET_ERR(ZReplyPortRecv(reply_port_cap, &num_bytes, &resp, &num_caps, &cap));
if (num_bytes != sizeof(resp) || num_caps != 1) {
return glcr::FAILED_PRECONDITION;
}
return glcr::Pair{resp, cap};
}
template <typename Req, typename Resp>
glcr::ErrorOr<Resp> EndpointClient::CallEndpoint(const Req& req) {
uint64_t reply_port_cap;
RET_ERR(ZEndpointSend(cap_, sizeof(Req), &req, 0, nullptr, &reply_port_cap));
Resp resp;
uint64_t num_bytes = sizeof(Resp);
uint64_t num_caps = 0;
RET_ERR(
ZReplyPortRecv(reply_port_cap, &num_bytes, &resp, &num_caps, nullptr));
if (num_bytes != sizeof(resp)) {
return glcr::FAILED_PRECONDITION;
}
return resp;
}
} // namespace mmth

View file

@ -0,0 +1,45 @@
#include "ipc/endpoint_server.h"
#include "util/debug.h"
namespace mmth {
// Declared as friend in EndpointServer.
void EndpointServerThreadBootstrap(void* endpoint_server) {
reinterpret_cast<EndpointServer*>(endpoint_server)->ServerThread();
}
glcr::ErrorOr<glcr::UniquePtr<EndpointClient>> EndpointServer::CreateClient() {
uint64_t client_cap;
RET_ERR(ZCapDuplicate(endpoint_cap_, ~(kZionPerm_Read), &client_cap));
return EndpointClient::AdoptEndpoint(client_cap);
}
Thread EndpointServer::RunServer() {
return Thread(EndpointServerThreadBootstrap, this);
}
void EndpointServer::ServerThread() {
while (true) {
uint64_t message_size = kBufferSize;
uint64_t reply_port_cap = 0;
uint64_t num_caps = 0;
glcr::ErrorCode err = static_cast<glcr::ErrorCode>(
ZEndpointRecv(endpoint_cap_, &message_size, recieve_buffer_, &num_caps,
nullptr, &reply_port_cap));
if (err != glcr::OK) {
dbgln("Error in receive: {x}", err);
continue;
}
RequestContext request(recieve_buffer_, message_size);
ResponseContext response(reply_port_cap);
// FIXME: Consider pumping these errors into the response as well.
check(HandleRequest(request, response));
if (!response.HasWritten()) {
dbgln("Returning without having written a response. Req type {x}",
request.request_id());
}
}
}
} // namespace mmth

View file

@ -0,0 +1,39 @@
#pragma once
#include <glacier/memory/unique_ptr.h>
#include <glacier/status/error_or.h>
#include <ztypes.h>
#include "mammoth/ipc/endpoint_client.h"
#include "mammoth/ipc/request_context.h"
#include "mammoth/ipc/response_context.h"
#include "mammoth/proc/thread.h"
namespace mmth {
class EndpointServer {
public:
EndpointServer() = delete;
EndpointServer(const EndpointServer&) = delete;
EndpointServer& operator=(const EndpointServer&) = delete;
glcr::ErrorOr<glcr::UniquePtr<EndpointClient>> CreateClient();
Thread RunServer();
virtual glcr::ErrorCode HandleRequest(RequestContext& request,
ResponseContext& response) = 0;
protected:
EndpointServer(z_cap_t cap) : endpoint_cap_(cap) {}
private:
z_cap_t endpoint_cap_;
static const uint64_t kBufferSize = 1024;
uint8_t recieve_buffer_[kBufferSize];
friend void EndpointServerThreadBootstrap(void* endpoint_server);
void ServerThread();
};
} // namespace mmth

View file

@ -19,14 +19,4 @@ glcr::ErrorCode PortClient::WriteByte(uint8_t byte) {
return static_cast<glcr::ErrorCode>( return static_cast<glcr::ErrorCode>(
ZPortSend(port_cap_, 1, &byte, 0, nullptr)); ZPortSend(port_cap_, 1, &byte, 0, nullptr));
} }
glcr::ErrorCode PortClient::Write(uint16_t data) {
return static_cast<glcr::ErrorCode>(
ZPortSend(port_cap_, 2, &data, 0, nullptr));
}
glcr::ErrorCode PortClient::Write(uint64_t data) {
return static_cast<glcr::ErrorCode>(
ZPortSend(port_cap_, 8, &data, 0, nullptr));
}
} // namespace mmth } // namespace mmth

View file

@ -10,7 +10,6 @@ namespace mmth {
class PortClient { class PortClient {
public: public:
PortClient() {} PortClient() {}
PortClient(z_cap_t port_cap);
static PortClient AdoptPort(z_cap_t port_cap); static PortClient AdoptPort(z_cap_t port_cap);
template <typename T> template <typename T>
@ -19,8 +18,6 @@ class PortClient {
glcr::ErrorCode WriteString(glcr::String str, z_cap_t cap); glcr::ErrorCode WriteString(glcr::String str, z_cap_t cap);
glcr::ErrorCode WriteByte(uint8_t byte); glcr::ErrorCode WriteByte(uint8_t byte);
glcr::ErrorCode Write(uint16_t data);
glcr::ErrorCode Write(uint64_t data);
z_cap_t cap() { return port_cap_; } z_cap_t cap() { return port_cap_; }
@ -28,6 +25,8 @@ class PortClient {
private: private:
z_cap_t port_cap_ = 0; z_cap_t port_cap_ = 0;
PortClient(z_cap_t port_cap);
}; };
template <typename T> template <typename T>

View file

@ -55,12 +55,4 @@ glcr::ErrorOr<char> PortServer::RecvChar() {
return byte; return byte;
} }
glcr::ErrorOr<uint16_t> PortServer::RecvUint16() {
uint64_t bytes = 2;
uint64_t caps = 0;
uint16_t data;
RET_ERR(ZPortRecv(port_cap_, &bytes, &data, &caps, nullptr));
return data;
}
} // namespace mmth } // namespace mmth

View file

@ -19,7 +19,6 @@ class PortServer {
glcr::ErrorCode PollForIntCap(uint64_t* msg, uint64_t* cap); glcr::ErrorCode PollForIntCap(uint64_t* msg, uint64_t* cap);
glcr::ErrorOr<char> RecvChar(); glcr::ErrorOr<char> RecvChar();
glcr::ErrorOr<uint16_t> RecvUint16();
z_cap_t cap() { return port_cap_; } z_cap_t cap() { return port_cap_; }

View file

@ -0,0 +1,32 @@
#pragma once
#include <glacier/status/error.h>
#include <stdint.h>
class RequestContext {
public:
RequestContext(void* buffer, uint64_t buffer_length)
: buffer_(buffer), buffer_length_(buffer_length) {
if (buffer_length_ < sizeof(uint64_t)) {
request_id_ = -1;
} else {
request_id_ = *reinterpret_cast<uint64_t*>(buffer);
}
}
uint64_t request_id() { return request_id_; }
template <typename T>
glcr::ErrorCode As(T** arg) {
if (buffer_length_ < sizeof(T)) {
return glcr::INVALID_ARGUMENT;
}
*arg = reinterpret_cast<T*>(buffer_);
return glcr::OK;
}
private:
uint64_t request_id_;
void* buffer_;
uint64_t buffer_length_;
};

View file

@ -0,0 +1,147 @@
#include "proc/process.h"
#include <glacier/status/error.h>
#include <zcall.h>
#include "ipc/endpoint_server.h"
#include "ipc/port_client.h"
#include "ipc/port_server.h"
#include "util/debug.h"
#include "util/init.h"
#define MAM_PROC_DEBUG 0
namespace mmth {
namespace {
typedef struct {
char ident[16];
uint16_t type;
uint16_t machine;
uint32_t version;
uint64_t entry;
uint64_t phoff;
uint64_t shoff;
uint32_t flags;
uint16_t ehsize;
uint16_t phentsize;
uint16_t phnum;
uint16_t shentsize;
uint16_t shnum;
uint16_t shstrndx;
} Elf64Header;
typedef struct {
uint32_t name;
uint32_t type;
uint64_t flags;
uint64_t addr;
uint64_t offset;
uint64_t size;
uint32_t link;
uint32_t info;
uint64_t addralign;
uint64_t entsize;
} Elf64SectionHeader;
typedef struct {
uint32_t type;
uint32_t flags;
uint64_t offset;
uint64_t vaddr;
uint64_t paddr;
uint64_t filesz;
uint64_t memsz;
uint64_t align;
} Elf64ProgramHeader;
void memcpy(uint64_t base, uint64_t len, uint64_t dest) {
uint8_t* srcptr = reinterpret_cast<uint8_t*>(base);
uint8_t* destptr = reinterpret_cast<uint8_t*>(dest);
for (uint64_t i = 0; i < len; i++) {
destptr[i] = srcptr[i];
}
}
uint64_t LoadElfProgram(uint64_t base, uint64_t as_cap) {
Elf64Header* header = reinterpret_cast<Elf64Header*>(base);
Elf64ProgramHeader* programs =
reinterpret_cast<Elf64ProgramHeader*>(base + header->phoff);
for (uint64_t i = 0; i < header->phnum; i++) {
Elf64ProgramHeader& program = programs[i];
#if MAM_PROC_DEBUG
dbgln("Create mem object");
#endif
uint64_t page_offset = program.vaddr & 0xFFF;
uint64_t mem_cap;
uint64_t size = page_offset + program.memsz;
check(ZMemoryObjectCreate(size, &mem_cap));
#if MAM_PROC_DEBUG
dbgln("Map Local");
#endif
uint64_t vaddr;
check(ZAddressSpaceMap(gSelfVmasCap, 0, mem_cap, 0, &vaddr));
uint8_t* offset = reinterpret_cast<uint8_t*>(vaddr);
for (uint64_t j = 0; j < size; j++) {
offset[j] = 0;
}
#if MAM_PROC_DEBUG
dbgln("Copy");
#endif
memcpy(base + program.offset, program.filesz, vaddr + page_offset);
#if MAM_PROC_DEBUG
dbgln("Map Foreign");
#endif
check(ZAddressSpaceMap(as_cap, program.vaddr - page_offset, mem_cap, 0,
&vaddr));
}
return header->entry;
}
} // namespace
glcr::ErrorOr<z_cap_t> SpawnProcessFromElfRegion(uint64_t program,
z_cap_t yellowstone_client) {
uint64_t proc_cap;
uint64_t as_cap;
uint64_t foreign_port_id;
uint64_t port_cap;
#if MAM_PROC_DEBUG
dbgln("Port Create");
#endif
ASSIGN_OR_RETURN(PortServer server, PortServer::Create());
ASSIGN_OR_RETURN(PortClient pclient, server.CreateClient());
#if MAM_PROC_DEBUG
dbgln("Spawn");
#endif
RET_ERR(ZProcessSpawn(gSelfProcCap, server.cap(), &proc_cap, &as_cap,
&foreign_port_id));
uint64_t entry_point = LoadElfProgram(program, as_cap);
#if MAM_PROC_DEBUG
dbgln("Thread Create");
#endif
uint64_t thread_cap;
RET_ERR(ZThreadCreate(proc_cap, &thread_cap));
uint64_t dup_proc_cap;
RET_ERR(ZCapDuplicate(proc_cap, kZionPerm_All, &dup_proc_cap));
RET_ERR(pclient.WriteMessage<uint64_t>(Z_INIT_SELF_PROC, dup_proc_cap));
RET_ERR(pclient.WriteMessage<uint64_t>(Z_INIT_SELF_VMAS, as_cap));
RET_ERR(pclient.WriteMessage<uint64_t>(Z_INIT_ENDPOINT, yellowstone_client));
#if MAM_PROC_DEBUG
dbgln("Thread start");
#endif
RET_ERR(ZThreadStart(thread_cap, entry_point, foreign_port_id, 0));
return proc_cap;
}
} // namespace mmth

View file

@ -0,0 +1,12 @@
#pragma once
#include <glacier/status/error_or.h>
#include <stdint.h>
#include <ztypes.h>
namespace mmth {
glcr::ErrorOr<z_cap_t> SpawnProcessFromElfRegion(uint64_t program,
z_cap_t yellowstone_client);
} // namespace mmth

View file

@ -0,0 +1,31 @@
#include "sync/mutex.h"
#include <zcall.h>
namespace mmth {
Mutex::Mutex(Mutex&& other) : mutex_cap_(other.mutex_cap_) {
other.mutex_cap_ = 0;
}
Mutex& Mutex::operator=(Mutex&& other) {
// TODO: Release existing mutex if it exists.
mutex_cap_ = other.mutex_cap_;
other.mutex_cap_ = 0;
return *this;
}
glcr::ErrorOr<Mutex> Mutex::Create() {
z_cap_t mutex_cap;
RET_ERR(ZMutexCreate(&mutex_cap));
return Mutex(mutex_cap);
}
glcr::ErrorCode Mutex::Lock() {
return static_cast<glcr::ErrorCode>(ZMutexLock(mutex_cap_));
}
glcr::ErrorCode Mutex::Release() {
return static_cast<glcr::ErrorCode>(ZMutexRelease(mutex_cap_));
}
} // namespace mmth

25
lib/mammoth/sync/mutex.h Normal file
View file

@ -0,0 +1,25 @@
#pragma once
#include <glacier/status/error_or.h>
#include <ztypes.h>
namespace mmth {
class Mutex {
public:
Mutex(const Mutex&) = delete;
Mutex(Mutex&&);
Mutex& operator=(Mutex&&);
static glcr::ErrorOr<Mutex> Create();
glcr::ErrorCode Lock();
glcr::ErrorCode Release();
private:
z_cap_t mutex_cap_;
Mutex(z_cap_t mutex_cap) : mutex_cap_(mutex_cap) {}
};
} // namespace mmth

View file

@ -71,8 +71,4 @@ z_cap_t OwnedMemoryRegion::DuplicateCap() {
return cap; return cap;
} }
OwnedMemoryRegion OwnedMemoryRegion::Duplicate() {
return OwnedMemoryRegion::FromCapability(DuplicateCap());
}
} // namespace mmth } // namespace mmth

View file

@ -25,15 +25,14 @@ class OwnedMemoryRegion {
static OwnedMemoryRegion ContiguousPhysical(uint64_t size, uint64_t* paddr); static OwnedMemoryRegion ContiguousPhysical(uint64_t size, uint64_t* paddr);
static OwnedMemoryRegion DirectPhysical(uint64_t paddr, uint64_t size); static OwnedMemoryRegion DirectPhysical(uint64_t paddr, uint64_t size);
uint64_t vaddr() const { return vaddr_; } uint64_t vaddr() { return vaddr_; }
uint64_t size() const { return size_; } uint64_t size() { return size_; }
z_cap_t cap() const { return vmmo_cap_; } z_cap_t cap() { return vmmo_cap_; }
z_cap_t DuplicateCap(); z_cap_t DuplicateCap();
OwnedMemoryRegion Duplicate();
bool empty() const { return vmmo_cap_ == 0; } bool empty() { return vmmo_cap_ != 0; }
explicit operator bool() const { return vmmo_cap_ != 0; } explicit operator bool() { return vmmo_cap_ != 0; }
private: private:
OwnedMemoryRegion(uint64_t vmmo_cap, uint64_t vaddr, uint64_t size) OwnedMemoryRegion(uint64_t vmmo_cap, uint64_t vaddr, uint64_t size)

View file

@ -12,7 +12,7 @@ class PageAllocator {
public: public:
static uint64_t AllocatePagePair() { static uint64_t AllocatePagePair() {
uint64_t mem_cap; uint64_t mem_cap;
check(ZMemoryObjectCreate(0x4000, &mem_cap)); check(ZMemoryObjectCreate(0x2000, &mem_cap));
uint64_t vaddr; uint64_t vaddr;
check(ZAddressSpaceMap(gSelfVmasCap, 0, mem_cap, /* align= */ 0x2000, check(ZAddressSpaceMap(gSelfVmasCap, 0, mem_cap, /* align= */ 0x2000,
@ -60,8 +60,8 @@ class BuddyAllocator {
void* Allocate(uint64_t size) { void* Allocate(uint64_t size) {
check(ZMutexLock(mutex_cap_)); check(ZMutexLock(mutex_cap_));
if (size > (0x4000 - sizeof(BuddySlot))) { if (size > (0x2000 - sizeof(BuddySlot))) {
crash("Can't allocate greater than four pages", glcr::UNIMPLEMENTED); crash("Can't allocate greater than one page", glcr::UNIMPLEMENTED);
} }
if (free_front_ == nullptr) { if (free_front_ == nullptr) {
AddPage(); AddPage();

View file

@ -1,5 +1,4 @@
set(yunq_files set(yunq_files
message_view.cpp
serialize.cpp serialize.cpp
) )
@ -16,7 +15,3 @@ target_include_directories(yunq
set_target_properties(yunq PROPERTIES set_target_properties(yunq PROPERTIES
COMPILE_FLAGS "${CMAKE_CXX_FLAGS} ${BASE_COMPILE_FLAGS}") COMPILE_FLAGS "${CMAKE_CXX_FLAGS} ${BASE_COMPILE_FLAGS}")
if (enable_testing)
add_subdirectory(test)
endif()

View file

@ -1,72 +0,0 @@
#include "message_view.h"
namespace yunq {
namespace {
const uint64_t kIdentByte = 0x33441122;
} // namespace
glcr::Status MessageView::CheckHeader() const {
if (buffer_.At<uint32_t>(offset_ + 0) != kIdentByte) {
return glcr::InvalidArgument("Trying to parse an invalid yunq message.");
}
// TODO: Parse core size.
// TODO: Parse extension size.
// TODO: Check CRC32
// TODO: Parse options.
return glcr::Status::Ok();
}
uint32_t MessageView::MessageLength() const {
return buffer_.At<uint32_t>(offset_ + 8);
}
template <>
glcr::ErrorOr<uint64_t> MessageView::ReadField<uint64_t>(
uint64_t field_index) const {
return buffer_.At<uint64_t>(field_offset(field_index));
}
template <>
glcr::ErrorOr<int64_t> MessageView::ReadField<int64_t>(
uint64_t field_index) const {
return buffer_.At<int64_t>(field_offset(field_index));
}
template <>
glcr::ErrorOr<glcr::String> MessageView::ReadField<glcr::String>(
uint64_t field_index) const {
ExtensionPointer ptr =
buffer_.At<ExtensionPointer>(field_offset(field_index));
return buffer_.StringAt(offset_ + ptr.offset, ptr.length);
}
template <>
glcr::ErrorOr<glcr::Vector<uint64_t>> MessageView::ReadRepeated<uint64_t>(
uint64_t field_index) const {
ExtensionPointer pointer =
buffer_.At<ExtensionPointer>(field_offset(field_index));
glcr::Vector<uint64_t> v;
v.Resize(pointer.length / sizeof(uint64_t));
for (uint64_t i = offset_ + pointer.offset;
i < offset_ + pointer.offset + pointer.length; i += sizeof(uint64_t)) {
v.PushBack(buffer_.At<uint64_t>(i));
}
return v;
}
glcr::ErrorOr<uint64_t> MessageView::ReadCapability(
uint64_t field_index) const {
return buffer_.At<uint64_t>(field_offset(field_index));
}
glcr::ErrorOr<uint64_t> MessageView::ReadCapability(
uint64_t field_index, const glcr::CapBuffer& caps) const {
uint64_t offset = buffer_.At<uint64_t>(field_offset(field_index));
return caps.At(offset);
}
} // namespace yunq

View file

@ -1,91 +0,0 @@
#pragma once
#include <glacier/buffer/byte_buffer.h>
#include <glacier/buffer/cap_buffer.h>
#include <glacier/container/vector.h>
#include <glacier/status/error_or.h>
#include <glacier/status/status.h>
#include "yunq/yunq.h"
namespace yunq {
class MessageView {
public:
MessageView(const glcr::ByteBuffer& buffer, uint64_t offset)
: buffer_(buffer), offset_(offset) {}
[[nodiscard]] glcr::Status CheckHeader() const;
uint32_t MessageLength() const;
// TODO: Implement glcr::StatusOr
template <typename T>
glcr::ErrorOr<T> ReadField(uint64_t field_index) const;
template <typename T>
glcr::ErrorOr<glcr::Vector<T>> ReadRepeated(uint64_t field_index) const;
glcr::ErrorOr<uint64_t> ReadCapability(uint64_t field_index) const;
glcr::ErrorOr<uint64_t> ReadCapability(uint64_t field_index,
const glcr::CapBuffer& caps) const;
template <typename T>
glcr::Status ReadMessage(uint64_t field_index, T& message) const;
template <typename T>
glcr::Status ReadRepeatedMessage(uint64_t field_index,
glcr::Vector<T>& messages) const;
private:
const glcr::ByteBuffer& buffer_;
uint64_t offset_;
uint64_t field_offset(uint64_t field_index) const {
return offset_ + kHeaderSize + (8 * field_index);
}
};
template <>
glcr::ErrorOr<uint64_t> MessageView::ReadField<uint64_t>(
uint64_t field_index) const;
template <>
glcr::ErrorOr<int64_t> MessageView::ReadField<int64_t>(
uint64_t field_index) const;
template <>
glcr::ErrorOr<glcr::String> MessageView::ReadField<glcr::String>(
uint64_t field_index) const;
template <>
glcr::ErrorOr<glcr::Vector<uint64_t>> MessageView::ReadRepeated<uint64_t>(
uint64_t field_index) const;
template <typename T>
glcr::Status MessageView::ReadMessage(uint64_t field_index, T& message) const {
ExtensionPointer ptr =
buffer_.At<ExtensionPointer>(field_offset(field_index));
MessageView subview(buffer_, offset_ + ptr.offset);
return message.ParseFromBytes(subview);
}
template <typename T>
glcr::Status MessageView::ReadRepeatedMessage(uint64_t field_index,
glcr::Vector<T>& messages) const {
ExtensionPointer ptr =
buffer_.At<ExtensionPointer>(field_offset(field_index));
uint64_t ext_offset = ptr.offset;
while (ext_offset < ptr.offset + ptr.length) {
MessageView subview(buffer_, offset_ + ext_offset);
messages.EmplaceBack();
RETURN_ERROR(messages.PeekBack().ParseFromBytes(subview));
ext_offset += subview.MessageLength();
}
return glcr::Status::Ok();
}
} // namespace yunq

View file

@ -3,71 +3,27 @@
namespace yunq { namespace yunq {
namespace { namespace {
const uint32_t kIdentByte = 0x33441122; const uint64_t kIdentByte = 0x33441122;
} // namespace } // namespace
void Serializer::WriteHeader() { glcr::Status CheckHeader(const glcr::ByteBuffer& buffer, uint64_t offset) {
buffer_.WriteAt<uint32_t>(offset_ + 0, kIdentByte); if (buffer.At<uint32_t>(offset + 0) != kIdentByte) {
buffer_.WriteAt<uint32_t>(offset_ + 4, core_size_); return glcr::InvalidArgument("Trying to parse an invalid yunq message.");
buffer_.WriteAt<uint32_t>(offset_ + 8, next_extension_);
buffer_.WriteAt<uint32_t>(offset_ + 12, 0); // TODO: Calculate CRC32.
}
template <>
void Serializer::WriteField<uint64_t>(uint64_t field_index,
const uint64_t& value) {
buffer_.WriteAt<uint64_t>(field_offset(field_index), value);
}
template <>
void Serializer::WriteField<int64_t>(uint64_t field_index,
const int64_t& value) {
buffer_.WriteAt<int64_t>(field_offset(field_index), value);
}
template <>
void Serializer::WriteField<glcr::String>(uint64_t field_index,
const glcr::String& value) {
ExtensionPointer ptr{
.offset = (uint32_t)next_extension_,
// FIXME: Check downcast of str length.
.length = (uint32_t)value.length(),
};
buffer_.WriteStringAt(offset_ + next_extension_, value);
next_extension_ += ptr.length;
buffer_.WriteAt<ExtensionPointer>(field_offset(field_index), ptr);
}
template <>
void Serializer::WriteRepeated<uint64_t>(uint64_t field_index,
const glcr::Vector<uint64_t>& value) {
ExtensionPointer ptr{
.offset = (uint32_t)next_extension_,
.length = (uint32_t)(value.size() * sizeof(uint64_t)),
};
next_extension_ += ptr.length;
buffer_.WriteAt<ExtensionPointer>(field_offset(field_index), ptr);
for (uint64_t i = 0; i < value.size(); i++) {
uint32_t ext_offset = offset_ + ptr.offset + (i * sizeof(uint64_t));
buffer_.WriteAt<uint64_t>(ext_offset, value.at(i));
} }
// TODO: Parse core size.
// TODO: Parse extension size.
// TODO: Check CRC32
// TODO: Parse options.
return glcr::Status::Ok();
} }
void Serializer::WriteCapability(uint64_t field_index, uint64_t value) { void WriteHeader(glcr::ByteBuffer& bytes, uint64_t offset, uint32_t core_size,
if (caps_) { uint32_t extension_size) {
buffer_.WriteAt<uint64_t>(field_offset(field_index), next_cap_); bytes.WriteAt<uint32_t>(offset + 0, kIdentByte);
caps_.value().get().WriteAt(next_cap_++, value); bytes.WriteAt<uint32_t>(offset + 4, core_size);
} else { bytes.WriteAt<uint32_t>(offset + 8, extension_size);
WriteField<uint64_t>(field_index, value); bytes.WriteAt<uint32_t>(offset + 12, 0); // TODO: Calculate CRC32.
}
} }
void Serializer::WriteRepeatedCapability(uint64_t field_index,
const glcr::Vector<uint64_t>& value) {}
} // namespace yunq } // namespace yunq

View file

@ -1,126 +1,14 @@
#pragma once #pragma once
#include <glacier/buffer/byte_buffer.h> #include <glacier/buffer/byte_buffer.h>
#include <glacier/buffer/cap_buffer.h>
#include <glacier/container/optional.h>
#include <glacier/container/vector.h>
#include <glacier/memory/reference.h>
#include <glacier/status/status.h> #include <glacier/status/status.h>
#include "yunq/yunq.h"
namespace yunq { namespace yunq {
class Serializer { [[nodiscard]] glcr::Status CheckHeader(const glcr::ByteBuffer& buffer,
public: uint64_t offset);
Serializer(glcr::ByteBuffer& bytes, uint64_t offset, uint64_t num_fields)
: buffer_(bytes),
offset_(offset),
next_extension_(kHeaderSize + (8 * num_fields)),
core_size_(next_extension_),
caps_() {}
Serializer(glcr::ByteBuffer& bytes, uint64_t offset, uint64_t num_fields,
glcr::CapBuffer& caps)
: buffer_(bytes),
offset_(offset),
next_extension_(kHeaderSize + (8 * num_fields)),
core_size_(next_extension_),
caps_(caps) {}
template <typename T>
void WriteField(uint64_t field_index, const T& value);
template <typename T> void WriteHeader(glcr::ByteBuffer& bytes, uint64_t offset, uint32_t core_size,
void WriteRepeated(uint64_t field_index, const glcr::Vector<T>& value); uint32_t extension_size);
void WriteCapability(uint64_t field_index, uint64_t value);
void WriteRepeatedCapability(uint64_t field_index,
const glcr::Vector<uint64_t>& value);
template <typename T>
void WriteMessage(uint64_t field_index, const T& value);
template <typename T>
void WriteRepeatedMessage(uint64_t field_index, const glcr::Vector<T>& value);
void WriteHeader();
uint64_t size() const { return next_extension_; }
private:
glcr::ByteBuffer& buffer_;
uint64_t offset_;
uint64_t next_extension_;
uint64_t core_size_;
uint64_t next_cap_ = 0;
glcr::Optional<glcr::Ref<glcr::CapBuffer>> caps_;
uint64_t field_offset(uint64_t field_index) const {
return offset_ + kHeaderSize + (8 * field_index);
}
};
template <>
void Serializer::WriteField<uint64_t>(uint64_t field_index,
const uint64_t& value);
template <>
void Serializer::WriteField<int64_t>(uint64_t field_index,
const int64_t& value);
template <>
void Serializer::WriteField<glcr::String>(uint64_t field_index,
const glcr::String& value);
template <>
void Serializer::WriteRepeated<uint64_t>(uint64_t field_index,
const glcr::Vector<uint64_t>& value);
template <typename T>
void Serializer::WriteMessage(uint64_t field_index, const T& value) {
uint64_t length = 0;
if (caps_) {
length = value.SerializeToBytes(buffer_, offset_ + next_extension_,
caps_.value().get());
} else {
length = value.SerializeToBytes(buffer_, offset_ + next_extension_);
}
ExtensionPointer ptr{
.offset = (uint32_t)next_extension_,
.length = (uint32_t)length,
};
next_extension_ += length;
buffer_.WriteAt<ExtensionPointer>(field_offset(field_index), ptr);
}
template <typename T>
void Serializer::WriteRepeatedMessage(uint64_t field_index,
const glcr::Vector<T>& value) {
uint64_t next_offset = next_extension_;
uint64_t length = value.size();
for (T& message : value) {
uint64_t msg_length = 0;
if (caps_) {
msg_length = message.SerializeToBytes(buffer_, offset_ + next_offset,
caps_.value().get());
} else {
msg_length = message.SerializeToBytes(buffer_, offset_ + next_offset);
}
next_offset += msg_length;
}
ExtensionPointer ptr{
.offset = (uint32_t)next_extension_,
.length = (uint32_t)length,
};
next_extension_ = next_offset;
buffer_.WriteAt<ExtensionPointer>(field_offset(field_index), ptr);
}
} // namespace yunq } // namespace yunq

View file

@ -1,44 +0,0 @@
add_executable(yunq_test yunq_test.cpp)
add_dependencies(yunq_test
example_yunq)
target_link_libraries(yunq_test
Catch2::Catch2WithMain
example_yunq)
target_include_directories(yunq_test PRIVATE "." "../../../zion/include")
add_test(NAME yunq_test COMMAND $<TARGET_FILE:yunq_test>)
add_dependencies(build_test
yunq_test)
# Build the yunq manually rather than using the generator
# because we don't want to link against mammoth and overrite new.
set(target example_yunq)
add_library(example_yunq
${CMAKE_CURRENT_SOURCE_DIR}/example/example.yunq.cpp
)
target_include_directories(example_yunq
PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/example"
)
target_link_libraries(${target}
glacier
yunq
zion_stub
)
set(PYTHON "${CMAKE_SOURCE_DIR}/yunq/venv/bin/python")
set(YUNQ "${CMAKE_SOURCE_DIR}/yunq/yunq.py")
add_custom_command(
OUTPUT
${CMAKE_CURRENT_SOURCE_DIR}/example/example.yunq.cpp
${CMAKE_CURRENT_SOURCE_DIR}/example/example.yunq.h
COMMAND ${PYTHON} ${YUNQ} ${CMAKE_CURRENT_SOURCE_DIR}/example/example.yunq
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/example/example.yunq
)

View file

@ -1,27 +0,0 @@
package ex;
message Basic {
u64 unsigned_int;
i64 signed_int;
string strn;
}
message Cap {
capability cap;
}
message Repeated {
repeated u64 unsigned_ints;
}
message Nested {
Basic basic;
Cap cap1;
Cap cap2;
}
message RepeatedNested {
repeated Basic basics;
repeated Cap caps;
}

View file

@ -1,178 +0,0 @@
// Generated file -- DO NOT MODIFY.
#include "example.yunq.h"
#include <yunq/message_view.h>
#include <yunq/serialize.h>
namespace ex {
namespace {
const uint64_t header_size = 24; // 4x uint32, 1x uint64
struct ExtPointer {
uint32_t offset;
uint32_t length;
};
} // namespace
glcr::Status Basic::ParseFromBytes(const yunq::MessageView& message) {
RETURN_ERROR(ParseFromBytesInternal(message));
return glcr::Status::Ok();
}
glcr::Status Basic::ParseFromBytes(const yunq::MessageView& message, const glcr::CapBuffer& caps) {
RETURN_ERROR(ParseFromBytesInternal(message));
return glcr::Status::Ok();
}
glcr::Status Basic::ParseFromBytesInternal(const yunq::MessageView& message) {
RETURN_ERROR(message.CheckHeader());
// Parse field.
ASSIGN_OR_RETURN(field_, message.ReadField<uint64_t>(0));
return glcr::Status::Ok();
}
uint64_t Basic::SerializeToBytes(glcr::ByteBuffer& bytes, uint64_t offset) const {
yunq::Serializer serializer(bytes, offset, 1);
return SerializeInternal(serializer);
}
uint64_t Basic::SerializeToBytes(glcr::ByteBuffer& bytes, uint64_t offset, glcr::CapBuffer& caps) const {
yunq::Serializer serializer(bytes, offset, 1, caps);
return SerializeInternal(serializer);
}
uint64_t Basic::SerializeInternal(yunq::Serializer& serializer) const {
// Write field.
serializer.WriteField<uint64_t>(0, field_);
serializer.WriteHeader();
return serializer.size();
}
glcr::Status Types::ParseFromBytes(const yunq::MessageView& message) {
RETURN_ERROR(ParseFromBytesInternal(message));
return glcr::Status::Ok();
}
glcr::Status Types::ParseFromBytes(const yunq::MessageView& message, const glcr::CapBuffer& caps) {
RETURN_ERROR(ParseFromBytesInternal(message));
return glcr::Status::Ok();
}
glcr::Status Types::ParseFromBytesInternal(const yunq::MessageView& message) {
RETURN_ERROR(message.CheckHeader());
// Parse unsigned_int.
ASSIGN_OR_RETURN(unsigned_int_, message.ReadField<uint64_t>(0));
// Parse signed_int.
ASSIGN_OR_RETURN(signed_int_, message.ReadField<int64_t>(1));
// Parse str.
ASSIGN_OR_RETURN(str_, message.ReadField<glcr::String>(2));
return glcr::Status::Ok();
}
uint64_t Types::SerializeToBytes(glcr::ByteBuffer& bytes, uint64_t offset) const {
yunq::Serializer serializer(bytes, offset, 3);
return SerializeInternal(serializer);
}
uint64_t Types::SerializeToBytes(glcr::ByteBuffer& bytes, uint64_t offset, glcr::CapBuffer& caps) const {
yunq::Serializer serializer(bytes, offset, 3, caps);
return SerializeInternal(serializer);
}
uint64_t Types::SerializeInternal(yunq::Serializer& serializer) const {
// Write unsigned_int.
serializer.WriteField<uint64_t>(0, unsigned_int_);
// Write signed_int.
serializer.WriteField<int64_t>(1, signed_int_);
// Write str.
serializer.WriteField<glcr::String>(2, str_);
serializer.WriteHeader();
return serializer.size();
}
glcr::Status Cap::ParseFromBytes(const yunq::MessageView& message) {
RETURN_ERROR(ParseFromBytesInternal(message));
// Parse cap.
ASSIGN_OR_RETURN(cap_, message.ReadCapability(0));
return glcr::Status::Ok();
}
glcr::Status Cap::ParseFromBytes(const yunq::MessageView& message, const glcr::CapBuffer& caps) {
RETURN_ERROR(ParseFromBytesInternal(message));
// Parse cap.
ASSIGN_OR_RETURN(cap_, message.ReadCapability(0, caps));
return glcr::Status::Ok();
}
glcr::Status Cap::ParseFromBytesInternal(const yunq::MessageView& message) {
RETURN_ERROR(message.CheckHeader());
// Parse cap.
return glcr::Status::Ok();
}
uint64_t Cap::SerializeToBytes(glcr::ByteBuffer& bytes, uint64_t offset) const {
yunq::Serializer serializer(bytes, offset, 1);
return SerializeInternal(serializer);
}
uint64_t Cap::SerializeToBytes(glcr::ByteBuffer& bytes, uint64_t offset, glcr::CapBuffer& caps) const {
yunq::Serializer serializer(bytes, offset, 1, caps);
return SerializeInternal(serializer);
}
uint64_t Cap::SerializeInternal(yunq::Serializer& serializer) const {
// Write cap.
serializer.WriteCapability(0, cap_);
serializer.WriteHeader();
return serializer.size();
}
glcr::Status Repeated::ParseFromBytes(const yunq::MessageView& message) {
RETURN_ERROR(ParseFromBytesInternal(message));
return glcr::Status::Ok();
}
glcr::Status Repeated::ParseFromBytes(const yunq::MessageView& message, const glcr::CapBuffer& caps) {
RETURN_ERROR(ParseFromBytesInternal(message));
return glcr::Status::Ok();
}
glcr::Status Repeated::ParseFromBytesInternal(const yunq::MessageView& message) {
RETURN_ERROR(message.CheckHeader());
// Parse unsigned_ints.
ASSIGN_OR_RETURN(unsigned_ints_, message.ReadRepeated<uint64_t>(0));
return glcr::Status::Ok();
}
uint64_t Repeated::SerializeToBytes(glcr::ByteBuffer& bytes, uint64_t offset) const {
yunq::Serializer serializer(bytes, offset, 1);
return SerializeInternal(serializer);
}
uint64_t Repeated::SerializeToBytes(glcr::ByteBuffer& bytes, uint64_t offset, glcr::CapBuffer& caps) const {
yunq::Serializer serializer(bytes, offset, 1, caps);
return SerializeInternal(serializer);
}
uint64_t Repeated::SerializeInternal(yunq::Serializer& serializer) const {
// Write unsigned_ints.
serializer.WriteRepeated<uint64_t>(0, unsigned_ints_);
serializer.WriteHeader();
return serializer.size();
}
} // namepace ex

View file

@ -1,129 +0,0 @@
// Generated file - DO NOT MODIFY
#pragma once
#include <glacier/buffer/byte_buffer.h>
#include <glacier/buffer/cap_buffer.h>
#include <glacier/status/status.h>
#include <glacier/container/vector.h>
#include <glacier/string/string.h>
#include <yunq/message_view.h>
#include <yunq/serialize.h>
#include <ztypes.h>
namespace ex {
class Basic {
public:
Basic() {}
// Delete copy and move until implemented.
Basic(const Basic&) = delete;
Basic(Basic&&) = default;
Basic& operator=(Basic&&) = default;
[[nodiscard]] glcr::Status ParseFromBytes(const yunq::MessageView& message);
[[nodiscard]] glcr::Status ParseFromBytes(const yunq::MessageView& message, const glcr::CapBuffer&);
uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset) const;
uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset, glcr::CapBuffer&) const;
const uint64_t& field() const { return field_; }
uint64_t& mutable_field() { return field_; }
void set_field(const uint64_t& value) { field_ = value; }
private:
uint64_t field_;
// Parses everything except for caps.
glcr::Status ParseFromBytesInternal(const yunq::MessageView& message);
uint64_t SerializeInternal(yunq::Serializer& serializer) const;
};
class Types {
public:
Types() {}
// Delete copy and move until implemented.
Types(const Types&) = delete;
Types(Types&&) = default;
Types& operator=(Types&&) = default;
[[nodiscard]] glcr::Status ParseFromBytes(const yunq::MessageView& message);
[[nodiscard]] glcr::Status ParseFromBytes(const yunq::MessageView& message, const glcr::CapBuffer&);
uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset) const;
uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset, glcr::CapBuffer&) const;
const uint64_t& unsigned_int() const { return unsigned_int_; }
uint64_t& mutable_unsigned_int() { return unsigned_int_; }
void set_unsigned_int(const uint64_t& value) { unsigned_int_ = value; }
const int64_t& signed_int() const { return signed_int_; }
int64_t& mutable_signed_int() { return signed_int_; }
void set_signed_int(const int64_t& value) { signed_int_ = value; }
const glcr::String& str() const { return str_; }
glcr::String& mutable_str() { return str_; }
void set_str(const glcr::String& value) { str_ = value; }
private:
uint64_t unsigned_int_;
int64_t signed_int_;
glcr::String str_;
// Parses everything except for caps.
glcr::Status ParseFromBytesInternal(const yunq::MessageView& message);
uint64_t SerializeInternal(yunq::Serializer& serializer) const;
};
class Cap {
public:
Cap() {}
// Delete copy and move until implemented.
Cap(const Cap&) = delete;
Cap(Cap&&) = default;
Cap& operator=(Cap&&) = default;
[[nodiscard]] glcr::Status ParseFromBytes(const yunq::MessageView& message);
[[nodiscard]] glcr::Status ParseFromBytes(const yunq::MessageView& message, const glcr::CapBuffer&);
uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset) const;
uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset, glcr::CapBuffer&) const;
const z_cap_t& cap() const { return cap_; }
z_cap_t& mutable_cap() { return cap_; }
void set_cap(const z_cap_t& value) { cap_ = value; }
private:
z_cap_t cap_;
// Parses everything except for caps.
glcr::Status ParseFromBytesInternal(const yunq::MessageView& message);
uint64_t SerializeInternal(yunq::Serializer& serializer) const;
};
class Repeated {
public:
Repeated() {}
// Delete copy and move until implemented.
Repeated(const Repeated&) = delete;
Repeated(Repeated&&) = default;
Repeated& operator=(Repeated&&) = default;
[[nodiscard]] glcr::Status ParseFromBytes(const yunq::MessageView& message);
[[nodiscard]] glcr::Status ParseFromBytes(const yunq::MessageView& message, const glcr::CapBuffer&);
uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset) const;
uint64_t SerializeToBytes(glcr::ByteBuffer&, uint64_t offset, glcr::CapBuffer&) const;
const glcr::Vector<uint64_t>& unsigned_ints() const { return unsigned_ints_; }
glcr::Vector<uint64_t>& mutable_unsigned_ints() { return unsigned_ints_; }
void add_unsigned_ints(const uint64_t& value) { unsigned_ints_.PushBack(value); }
void add_unsigned_ints(uint64_t&& value) { unsigned_ints_.PushBack(glcr::Move(value)); }
private:
glcr::Vector<uint64_t> unsigned_ints_;
// Parses everything except for caps.
glcr::Status ParseFromBytesInternal(const yunq::MessageView& message);
uint64_t SerializeInternal(yunq::Serializer& serializer) const;
};
} // namepace ex

View file

@ -1,100 +0,0 @@
#include <catch2/catch_test_macros.hpp>
#include "example/example.yunq.h"
TEST_CASE("Basic Setter/Getter", "[yunq]") {
ex::Basic b;
b.set_field(1);
REQUIRE(b.field() == 1);
}
TEST_CASE("Basic serialization", "[yunq]") {
ex::Basic a;
a.set_field(1);
glcr::ByteBuffer buf(1024);
a.SerializeToBytes(buf, 0);
ex::Basic b;
yunq::MessageView v(buf, 0);
REQUIRE(b.ParseFromBytes(v).ok() == true);
REQUIRE(b.field() == 1);
}
TEST_CASE("Types Setter/Getter", "[yunq]") {
ex::Types t;
t.set_unsigned_int(1);
t.set_signed_int(-1);
t.set_str("test");
REQUIRE(t.unsigned_int() == 1);
REQUIRE(t.signed_int() == -1);
REQUIRE(t.str() == "test");
}
TEST_CASE("Types Serialization", "[yunq]") {
ex::Types a;
a.set_unsigned_int(1);
a.set_signed_int(-1);
a.set_str("test");
glcr::ByteBuffer buf(1024);
a.SerializeToBytes(buf, 0);
ex::Types b;
yunq::MessageView v(buf, 0);
REQUIRE(b.ParseFromBytes(v).ok() == true);
REQUIRE(b.unsigned_int() == 1);
REQUIRE(b.signed_int() == -1);
REQUIRE(b.str() == "test");
}
TEST_CASE("Cap Setter/Getter", "[yunq]") {
ex::Cap c;
c.set_cap(1234);
REQUIRE(c.cap() == 1234);
}
TEST_CASE("Cap Serialization Inline", "[yunq]") {
ex::Cap a;
a.set_cap(1234);
glcr::ByteBuffer buf(1024);
a.SerializeToBytes(buf, 0);
ex::Cap b;
yunq::MessageView v(buf, 0);
REQUIRE(b.ParseFromBytes(v).ok() == true);
REQUIRE(b.cap() == 1234);
}
TEST_CASE("Cap Serialization Sidebuffer", "[yunq]") {
ex::Cap a;
a.set_cap(1234);
glcr::ByteBuffer buf(1024);
glcr::CapBuffer caps(1);
a.SerializeToBytes(buf, 0, caps);
ex::Cap b;
yunq::MessageView v(buf, 0);
REQUIRE(b.ParseFromBytes(v, caps).ok() == true);
REQUIRE(b.cap() == 1234);
}
TEST_CASE("Repeated Setter/Getter", "[yunq]") {
ex::Repeated r;
r.mutable_unsigned_ints().PushBack(1);
r.add_unsigned_ints(2);
uint64_t c = 3;
r.add_unsigned_ints(glcr::Move(c));
REQUIRE(r.unsigned_ints()[0] == 1);
REQUIRE(r.unsigned_ints()[1] == 2);
REQUIRE(r.unsigned_ints()[2] == 3);
}

View file

@ -1,22 +0,0 @@
#pragma once
#include <stdint.h>
namespace yunq {
struct MessageHeader {
uint32_t ident;
uint32_t core_length;
uint32_t length;
uint32_t crc32;
uint64_t options;
} __attribute__((packed));
const uint64_t kHeaderSize = 24; // 4x uint32, 1x uint64
struct ExtensionPointer {
uint32_t offset;
uint32_t length;
};
} // namespace yunq

View file

@ -1,3 +0,0 @@
[toolchain]
channel = "nightly-2025-10-02"
components = ["rustfmt", "rust-analyzer", "clippy", "rust-src"]

View file

@ -1,10 +0,0 @@
[unstable]
build-std-features = ["compiler-builtins-mem"]
build-std = ["core", "compiler_builtins", "alloc"]
[build]
target = "x86_64-acadia-os.json"
[alias]
test_pc = "test --target=x86_64-unknown-linux-gnu -Z build-std=std --lib"

248
rust/Cargo.lock generated
View file

@ -1,248 +0,0 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 4
[[package]]
name = "autocfg"
version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0"
[[package]]
name = "bitfield-struct"
version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "de05f8756f1c68937349406d4632ae96ae35901019b5e59c508d9c38c64715fb"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "convert_case"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ec182b0ca2f35d8fc196cf3404988fd8b8c739a4d270ff118a398feb0cbec1ca"
dependencies = [
"unicode-segmentation",
]
[[package]]
name = "denali"
version = "0.1.0"
dependencies = [
"bitfield-struct",
"mammoth",
"pci",
"yellowstone-yunq",
"yunq",
"yunqc",
]
[[package]]
name = "denali_client"
version = "0.1.0"
dependencies = [
"mammoth",
"yunq",
"yunqc",
]
[[package]]
name = "ext2"
version = "0.1.0"
dependencies = [
"denali_client",
"mammoth",
"yellowstone-yunq",
]
[[package]]
name = "linked_list_allocator"
version = "0.10.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9afa463f5405ee81cdb9cc2baf37e08ec7e4c8209442b5d72c04cfb2cd6e6286"
dependencies = [
"spinning_top",
]
[[package]]
name = "lock_api"
version = "0.4.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17"
dependencies = [
"autocfg",
"scopeguard",
]
[[package]]
name = "mammoth"
version = "0.1.0"
dependencies = [
"linked_list_allocator",
]
[[package]]
name = "pci"
version = "0.1.0"
dependencies = [
"bitfield-struct",
"mammoth",
]
[[package]]
name = "prettyplease"
version = "0.2.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5f12335488a2f3b0a83b14edad48dca9879ce89b2edd10e80237e4e852dd645e"
dependencies = [
"proc-macro2",
"syn",
]
[[package]]
name = "proc-macro2"
version = "1.0.86"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77"
dependencies = [
"unicode-ident",
]
[[package]]
name = "quote"
version = "1.0.36"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7"
dependencies = [
"proc-macro2",
]
[[package]]
name = "scopeguard"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
[[package]]
name = "spinning_top"
version = "0.2.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b9eb1a2f4c41445a3a0ff9abc5221c5fcd28e1f13cd7c0397706f9ac938ddb0"
dependencies = [
"lock_api",
]
[[package]]
name = "syn"
version = "2.0.72"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dc4b9b9bf2add8093d3f2c0204471e951b2285580335de42f9d2534f3ae7a8af"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "testbed"
version = "0.1.0"
dependencies = [
"mammoth",
"yellowstone-yunq",
"yunq",
]
[[package]]
name = "teton"
version = "0.1.0"
dependencies = [
"mammoth",
"victoriafalls",
"voyageurs",
"yellowstone-yunq",
]
[[package]]
name = "unicode-ident"
version = "1.0.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
[[package]]
name = "unicode-segmentation"
version = "1.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202"
[[package]]
name = "victoriafalls"
version = "0.1.0"
dependencies = [
"denali_client",
"ext2",
"mammoth",
"yellowstone-yunq",
"yunq",
"yunqc",
]
[[package]]
name = "voyageurs"
version = "0.1.0"
dependencies = [
"mammoth",
"yellowstone-yunq",
"yunq",
"yunqc",
]
[[package]]
name = "yellowstone"
version = "0.1.0"
dependencies = [
"denali_client",
"mammoth",
"victoriafalls",
"voyageurs",
"yellowstone-yunq",
"yunq",
]
[[package]]
name = "yellowstone-yunq"
version = "0.1.0"
dependencies = [
"mammoth",
"yunq",
"yunqc",
]
[[package]]
name = "yunq"
version = "0.1.0"
dependencies = [
"mammoth",
]
[[package]]
name = "yunq-test"
version = "0.1.0"
dependencies = [
"mammoth",
"yunq",
"yunqc",
]
[[package]]
name = "yunqc"
version = "0.1.0"
dependencies = [
"convert_case",
"prettyplease",
"proc-macro2",
"quote",
"syn",
]

View file

@ -1,16 +0,0 @@
[workspace]
members = [
"lib/client/denali_client", "lib/fs/ext2",
"lib/mammoth", "lib/pci",
"lib/voyageurs",
"lib/yellowstone",
"lib/yunq",
"lib/yunq-test",
"sys/denali",
"sys/teton",
"sys/victoriafalls",
"sys/yellowstone",
"usr/testbed",
]
resolver = "2"

View file

@ -1,11 +0,0 @@
[package]
name = "denali_client"
version = "0.1.0"
edition = "2024"
[dependencies]
mammoth = { path = "../../mammoth" }
yunq = { path = "../../yunq" }
[build-dependencies]
yunqc = { path = "../../../../yunq/rust" }

View file

@ -1,14 +0,0 @@
use std::fs;
fn main() {
let input_file = "../../../sys/denali/denali.yunq";
println!("cargo::rerun-if-changed={input_file}");
let input = fs::read_to_string(input_file).expect("Failed to read input file");
let code = yunqc::codegen(&input).expect("Failed to generate yunq code.");
let out = std::env::var("OUT_DIR").unwrap() + "/yunq.rs";
fs::write(out, code).expect("Failed to write generated code.");
}

View file

@ -1,50 +0,0 @@
use mammoth::{cap::Capability, zion::ZError};
use crate::{DenaliClient, DiskBlock, ReadManyRequest, ReadRequest};
pub struct DiskReader {
client: DenaliClient,
disk_id: u64,
lba_offset: u64,
block_multiplier: u64,
}
impl DiskReader {
pub fn new(client: DenaliClient, disk_id: u64, lba_offset: u64, block_multiplier: u64) -> Self {
Self {
client,
disk_id,
lba_offset,
block_multiplier,
}
}
// TODO: Make yunq clients callable from a non-mutable reference so this can be called from
// shared ownership.
pub fn read(&mut self, lba: u64, cnt: u64) -> Result<Capability, ZError> {
let read_resp = self.client.read(&ReadRequest {
device_id: self.disk_id,
block: DiskBlock {
lba: self.lba_offset + (lba * self.block_multiplier),
size: cnt * self.block_multiplier,
},
})?;
Ok(Capability::take(read_resp.memory))
}
pub fn read_many(&mut self, blocks: &[DiskBlock]) -> Result<Capability, ZError> {
let read_resp = self.client.read_many(&ReadManyRequest {
device_id: self.disk_id,
blocks: blocks
.iter()
.map(|b| DiskBlock {
lba: self.lba_offset + (b.lba * self.block_multiplier),
size: b.size * self.block_multiplier,
})
.collect(),
})?;
Ok(Capability::take(read_resp.memory))
}
}

View file

@ -1,9 +0,0 @@
#![no_std]
use core::include;
include!(concat!(env!("OUT_DIR"), "/yunq.rs"));
mod disk_reader;
pub use disk_reader::DiskReader;

View file

@ -1,9 +0,0 @@
[package]
name = "ext2"
version = "0.1.0"
edition = "2024"
[dependencies]
denali_client = { path = "../../client/denali_client" }
mammoth = { path = "../../mammoth" }
yellowstone-yunq = { path = "../../yellowstone/" }

View file

@ -1,282 +0,0 @@
use core::cmp::min;
use alloc::{collections::BTreeMap, string::String, vec::Vec};
use denali_client::{DenaliClient, DiskBlock, DiskReader, ReadRequest};
use mammoth::{cap::Capability, debug, mem::MemoryRegion, zion::ZError};
use yellowstone_yunq::DenaliInfo;
use crate::types::{BlockGroupDescriptor, DirEntry, Inode, Superblock};
pub struct FileInfo {
pub inode: u32,
pub name: String,
}
/// Ext2 Driver with the ability to read files and directories from the given disk.
///
/// Implementation based on the information available at
/// https://www.nongnu.org/ext2-doc/ext2.html
pub struct Ext2Driver {
reader: DiskReader,
superblock_region: MemoryRegion,
bgdt_region: MemoryRegion,
/// Cache of the memory regions for the inode tables available indexed by
/// the block_group number.
inode_table_map: Vec<Option<MemoryRegion>>,
/// Cache of inode_num to memory capability.
/// This is particularly important for directories so we
/// don't iterate over the disk each time.
inode_cache: BTreeMap<u32, Capability>,
}
impl Ext2Driver {
pub fn new(denali_info: DenaliInfo) -> Self {
let mut client = DenaliClient::new(Capability::take(denali_info.denali_endpoint));
// Calculate the absolute offset and size of the superblock. It is located at
// offset 1024 of the partition and is 1024 bytes long. (Mostly extra
// reserved space).
// Ref: https://www.nongnu.org/ext2-doc/ext2.html#def-superblock
let abs_superblock_start = denali_info.lba_offset + 2;
let abs_superblock_size = 2; // TODO: This assumes 512 bytes sectors.
let superblock_region = MemoryRegion::from_cap(Capability::take(
client
.read(&ReadRequest {
device_id: denali_info.device_id,
block: DiskBlock {
lba: abs_superblock_start,
size: abs_superblock_size,
},
})
.unwrap()
.memory,
))
.unwrap();
let superblock: &Superblock = superblock_region.as_ref();
assert!(superblock.is_valid());
let mut reader = DiskReader::new(
client,
denali_info.device_id,
denali_info.lba_offset,
superblock.sectors_per_block(),
);
let bgdt_region = MemoryRegion::from_cap(
reader
.read(superblock.bgdt_block_num(), superblock.bgdt_block_size())
.unwrap(),
)
.unwrap();
let mut inode_table_map = Vec::new();
inode_table_map.resize_with(superblock.num_block_groups() as usize, || None);
Self {
reader,
superblock_region,
bgdt_region,
inode_table_map,
inode_cache: BTreeMap::new(),
}
}
fn superblock(&self) -> &Superblock {
self.superblock_region.as_ref()
}
fn bgdt(&self) -> &[BlockGroupDescriptor] {
self.bgdt_region.slice()
}
/// Updates the cached inode tables to contain the inode table for
/// a specific group.
fn populate_inode_table_if_none(&mut self, block_group_num: usize) {
if self.inode_table_map[block_group_num].is_none() {
debug!(
"Cache MISS on inode table for block_group {}",
block_group_num
);
let inode_table = self.bgdt()[block_group_num].inode_table;
self.inode_table_map[block_group_num] = Some(
MemoryRegion::from_cap(
self.reader
.read(
inode_table as u64,
self.superblock().inode_table_block_size(),
)
.unwrap(),
)
.unwrap(),
);
} else {
debug!(
"Cache HIT on inode table for block_group {}",
block_group_num
);
}
}
pub fn get_inode(&mut self, inode_num: u32) -> Inode {
// See the following for a description of finding an inode.
// https://www.nongnu.org/ext2-doc/ext2.html#idm140660447281728
let block_group_num = (inode_num - 1) / self.superblock().inodes_per_group;
self.populate_inode_table_if_none(block_group_num as usize);
let region = self.inode_table_map[block_group_num as usize]
.as_ref()
.unwrap();
let local_index = (inode_num - 1) % self.superblock().inodes_per_group;
let offset = self.superblock().inode_size() * local_index as u64;
unsafe { region.raw_ptr_at_offset::<Inode>(offset).read().clone() }
}
fn get_blocks_from_single_indirect(&mut self, block_num: u64, num_blocks: usize) -> Vec<u32> {
assert!(num_blocks <= 256);
let single_indr_block_mem =
MemoryRegion::from_cap(self.reader.read(block_num, 1).unwrap()).unwrap();
single_indr_block_mem.slice()[..num_blocks].to_vec()
}
fn get_blocks_from_double_indirect(&mut self, block_num: u64, num_blocks: usize) -> Vec<u32> {
assert!(num_blocks > 0 && num_blocks <= (256 * 256));
let num_dbl_indr = ((num_blocks - 1) / 256) + 1;
let dbl_indr_block_mem =
MemoryRegion::from_cap(self.reader.read(block_num, 1).unwrap()).unwrap();
let dbl_indr_blocks: &[u32] = &dbl_indr_block_mem.slice()[0..num_dbl_indr];
let mut blocks_to_read = Vec::new();
for (i, dbl_indr_block) in dbl_indr_blocks.iter().enumerate() {
let num_blocks_in_single = min(num_blocks - (256 * i), 256);
blocks_to_read.append(
&mut self
.get_blocks_from_single_indirect(*dbl_indr_block as u64, num_blocks_in_single),
);
}
blocks_to_read
}
fn run_len_compress_blocks(&self, blocks: Vec<u32>) -> Vec<DiskBlock> {
let mut curr_block = DiskBlock {
lba: blocks[0] as u64,
size: 1,
};
let mut iter = blocks.into_iter();
iter.next();
let mut blocks = Vec::new();
for block in iter {
if block as u64 == (curr_block.lba + curr_block.size) {
curr_block.size += 1;
} else {
blocks.push(curr_block.clone());
curr_block.lba = block as u64;
curr_block.size = 1;
}
}
blocks.push(curr_block);
blocks
}
fn read_inode(&mut self, _inode_num: u32, inode: Inode) -> Result<Capability, ZError> {
// TODO: Cache this method using _inode_num
// TODO: This assumes 512 byte sectors.
let real_block_cnt = (inode.blocks as u64 - 1) / (self.superblock().block_size() / 512) + 1;
if inode.block[14] != 0 {
debug!("Can't handle triply indirect inodes yet.");
return Err(ZError::UNIMPLEMENTED);
}
let mut blocks_to_read = Vec::new();
for i in 0..min(12, real_block_cnt) {
blocks_to_read.push(inode.block[i as usize])
}
// Singly indirect block.
if inode.block[12] != 0 {
let num_blocks = min(256, real_block_cnt - 12) as usize;
blocks_to_read.append(
&mut self.get_blocks_from_single_indirect(inode.block[12] as u64, num_blocks),
);
}
// Doubly indirect block.
if inode.block[13] != 0 {
let num_blocks = min(256 * 256, real_block_cnt - 268) as usize;
blocks_to_read.append(
&mut self.get_blocks_from_double_indirect(inode.block[13] as u64, num_blocks),
);
};
self.reader
.read_many(&self.run_len_compress_blocks(blocks_to_read))
}
fn read_inode_into_mem(
&mut self,
inode_num: u32,
inode: Inode,
) -> Result<MemoryRegion, ZError> {
if !self.inode_cache.contains_key(&inode_num) {
debug!("Cache MISS for inode_num: {}", inode_num);
let inode_cap = self.read_inode(inode_num, inode)?;
self.inode_cache.insert(inode_num, inode_cap);
} else {
debug!("Cache HIT for inode_num: {}", inode_num);
}
MemoryRegion::from_cap(self.inode_cache[&inode_num].duplicate(Capability::PERMS_ALL)?)
}
pub fn read_file(&mut self, inode_num: u32) -> Result<Capability, ZError> {
let inode = self.get_inode(inode_num);
if (inode.mode & 0x8000) == 0 {
debug!("Reading non file.");
return Err(ZError::INVALID_ARGUMENT);
}
self.read_inode(inode_num, inode)
}
pub fn read_directory(&mut self, inode_num: u32) -> Result<Vec<FileInfo>, ZError> {
let inode = self.get_inode(inode_num);
if (inode.mode & 0x4000) == 0 {
let mode = inode.mode;
debug!("Reading non directory. Inode {:?}, Mode {}", inode, mode);
return Err(ZError::INVALID_ARGUMENT);
}
let dir = self.read_inode_into_mem(inode_num, inode)?;
let mut file_names = Vec::new();
let mut offset = 0;
while offset < dir.size() {
let dir_ptr: DirEntry = unsafe { dir.raw_ptr_at_offset::<DirEntry>(offset).read() };
let name = dir_ptr.name;
let file_name: String =
String::from_utf8(name[..dir_ptr.name_len as usize].to_vec()).unwrap();
file_names.push(FileInfo {
inode: dir_ptr.inode,
name: file_name,
});
offset += dir_ptr.record_length as u64;
}
Ok(file_names)
}
}

View file

@ -1,8 +0,0 @@
#![no_std]
extern crate alloc;
mod ext2_driver;
mod types;
pub use ext2_driver::Ext2Driver;

View file

@ -1,126 +0,0 @@
/// Superblock structure.
/// https://www.nongnu.org/ext2-doc/ext2.html#superblock
#[repr(C, packed)]
pub struct Superblock {
pub inodes_count: u32,
pub blocks_count: u32,
pub reserved_blocks_count: u32,
pub free_blocks_count: u32,
pub free_inodes_count: u32,
pub first_data_blok: u32,
pub log_block_size: u32,
pub log_frag_size: u32,
pub blocks_per_group: u32,
pub frags_per_group: u32,
pub inodes_per_group: u32,
pub mtime: u32,
pub wtime: u32,
pub mnt_count: u16,
pub max_mnt_count: u16,
pub magic: u16,
pub state: u16,
pub errors: u16,
pub minor_rev_level: u16,
pub lastcheck: u32,
pub checkinterval: u32,
pub creator_os: u32,
pub rev_level: u32,
pub def_resuid: u16,
pub def_resgid: u16,
pub first_ino: u32,
pub inode_size: u16,
}
impl Superblock {
pub fn is_valid(&self) -> bool {
self.magic == 0xEF53
}
pub fn sectors_per_block(&self) -> u64 {
1 << (self.log_block_size + 1)
}
pub fn block_size(&self) -> u64 {
1024 << self.log_block_size
}
pub fn bgdt_block_num(&self) -> u64 {
if self.block_size() == 1024 { 2 } else { 1 }
}
pub fn bgdt_block_size(&self) -> u64 {
(self.num_block_groups() * (size_of::<BlockGroupDescriptor>() as u64) - 1)
/ self.block_size()
+ 1
}
pub fn num_block_groups(&self) -> u64 {
(((self.blocks_count - 1) / self.blocks_per_group) + 1) as u64
}
pub fn inode_size(&self) -> u64 {
if self.rev_level >= 1 {
self.inode_size as u64
} else {
const DEFAULT_INODE_SIZE: u64 = 0x80;
DEFAULT_INODE_SIZE
}
}
pub fn inode_table_block_size(&self) -> u64 {
(self.inode_size() * self.inodes_per_group as u64) / self.block_size()
}
}
#[repr(C, packed)]
#[derive(Debug)]
pub struct BlockGroupDescriptor {
pub block_bitmap: u32,
pub inode_bitmap: u32,
pub inode_table: u32,
pub free_blocks_count: u16,
pub free_inodes_count: u16,
pub used_dirs_count: u16,
reserved: [u8; 14],
}
const _: () = assert!(size_of::<BlockGroupDescriptor>() == 32);
#[repr(C, packed)]
#[derive(Clone, Debug)]
pub struct Inode {
pub mode: u16,
pub uid: u16,
pub size: u32,
pub atime: u32,
pub ctime: u32,
pub mtime: u32,
pub dtime: u32,
pub gid: u16,
pub links_count: u16,
pub blocks: u32,
pub flags: u32,
pub osd1: u32,
pub block: [u32; 15],
pub generation: u32,
pub file_acl: u32,
pub dir_acl: u32,
pub faddr: u32,
pub osd2: [u32; 3],
}
const _: () = assert!(size_of::<Inode>() == 128);
#[allow(dead_code)]
pub const EXT2_FT_FILE: u8 = 0x1;
#[allow(dead_code)]
pub const EXT2_FT_DIR: u8 = 0x2;
#[repr(C, packed)]
pub struct DirEntry {
pub inode: u32,
pub record_length: u16,
pub name_len: u8,
pub file_type: u8,
pub name: [u8; 256],
}

View file

@ -1,14 +0,0 @@
[package]
name = "mammoth"
version = "0.1.0"
edition = "2021"
[lib]
name = "mammoth"
[dependencies]
linked_list_allocator = "0.10.5"
[features]
hosted = []
default = ["hosted"]

View file

@ -1,18 +0,0 @@
use std::env;
fn main() {
let mut curr_directory = env::current_dir().unwrap();
println!("{:?}", curr_directory);
assert!(curr_directory.pop());
assert!(curr_directory.pop());
assert!(curr_directory.pop());
curr_directory.push("builddbg");
curr_directory.push("zion");
println!(
"cargo:rustc-link-search={}",
curr_directory.to_str().unwrap()
);
println!("cargo:rustc-link-lib=zion_stub");
}

View file

@ -1,576 +0,0 @@
/* automatically generated by rust-bindgen 0.69.4 */
pub const _STDINT_H: u32 = 1;
pub const _FEATURES_H: u32 = 1;
pub const _ISOC95_SOURCE: u32 = 1;
pub const _ISOC99_SOURCE: u32 = 1;
pub const _ISOC11_SOURCE: u32 = 1;
pub const _ISOC2X_SOURCE: u32 = 1;
pub const _POSIX_SOURCE: u32 = 1;
pub const _POSIX_C_SOURCE: u32 = 200809;
pub const _XOPEN_SOURCE: u32 = 700;
pub const _XOPEN_SOURCE_EXTENDED: u32 = 1;
pub const _LARGEFILE64_SOURCE: u32 = 1;
pub const _DEFAULT_SOURCE: u32 = 1;
pub const _ATFILE_SOURCE: u32 = 1;
pub const _DYNAMIC_STACK_SIZE_SOURCE: u32 = 1;
pub const __GLIBC_USE_ISOC2X: u32 = 1;
pub const __USE_ISOC11: u32 = 1;
pub const __USE_ISOC99: u32 = 1;
pub const __USE_ISOC95: u32 = 1;
pub const __USE_ISOCXX11: u32 = 1;
pub const __USE_POSIX: u32 = 1;
pub const __USE_POSIX2: u32 = 1;
pub const __USE_POSIX199309: u32 = 1;
pub const __USE_POSIX199506: u32 = 1;
pub const __USE_XOPEN2K: u32 = 1;
pub const __USE_XOPEN2K8: u32 = 1;
pub const __USE_XOPEN: u32 = 1;
pub const __USE_XOPEN_EXTENDED: u32 = 1;
pub const __USE_UNIX98: u32 = 1;
pub const _LARGEFILE_SOURCE: u32 = 1;
pub const __USE_XOPEN2K8XSI: u32 = 1;
pub const __USE_XOPEN2KXSI: u32 = 1;
pub const __USE_LARGEFILE: u32 = 1;
pub const __USE_LARGEFILE64: u32 = 1;
pub const __WORDSIZE: u32 = 64;
pub const __WORDSIZE_TIME64_COMPAT32: u32 = 1;
pub const __SYSCALL_WORDSIZE: u32 = 64;
pub const __TIMESIZE: u32 = 64;
pub const __USE_MISC: u32 = 1;
pub const __USE_ATFILE: u32 = 1;
pub const __USE_DYNAMIC_STACK_SIZE: u32 = 1;
pub const __USE_GNU: u32 = 1;
pub const __USE_FORTIFY_LEVEL: u32 = 0;
pub const __GLIBC_USE_DEPRECATED_GETS: u32 = 0;
pub const __GLIBC_USE_DEPRECATED_SCANF: u32 = 0;
pub const __GLIBC_USE_C2X_STRTOL: u32 = 1;
pub const _STDC_PREDEF_H: u32 = 1;
pub const __STDC_IEC_559__: u32 = 1;
pub const __STDC_IEC_60559_BFP__: u32 = 201404;
pub const __STDC_IEC_559_COMPLEX__: u32 = 1;
pub const __STDC_IEC_60559_COMPLEX__: u32 = 201404;
pub const __STDC_ISO_10646__: u32 = 201706;
pub const __GNU_LIBRARY__: u32 = 6;
pub const __GLIBC__: u32 = 2;
pub const __GLIBC_MINOR__: u32 = 39;
pub const _SYS_CDEFS_H: u32 = 1;
pub const __glibc_c99_flexarr_available: u32 = 1;
pub const __LDOUBLE_REDIRECTS_TO_FLOAT128_ABI: u32 = 0;
pub const __HAVE_GENERIC_SELECTION: u32 = 0;
pub const __GLIBC_USE_LIB_EXT2: u32 = 1;
pub const __GLIBC_USE_IEC_60559_BFP_EXT: u32 = 1;
pub const __GLIBC_USE_IEC_60559_BFP_EXT_C2X: u32 = 1;
pub const __GLIBC_USE_IEC_60559_EXT: u32 = 1;
pub const __GLIBC_USE_IEC_60559_FUNCS_EXT: u32 = 1;
pub const __GLIBC_USE_IEC_60559_FUNCS_EXT_C2X: u32 = 1;
pub const __GLIBC_USE_IEC_60559_TYPES_EXT: u32 = 1;
pub const _BITS_TYPES_H: u32 = 1;
pub const _BITS_TYPESIZES_H: u32 = 1;
pub const __OFF_T_MATCHES_OFF64_T: u32 = 1;
pub const __INO_T_MATCHES_INO64_T: u32 = 1;
pub const __RLIM_T_MATCHES_RLIM64_T: u32 = 1;
pub const __STATFS_MATCHES_STATFS64: u32 = 1;
pub const __KERNEL_OLD_TIMEVAL_MATCHES_TIMEVAL64: u32 = 1;
pub const __FD_SETSIZE: u32 = 1024;
pub const _BITS_TIME64_H: u32 = 1;
pub const _BITS_WCHAR_H: u32 = 1;
pub const _BITS_STDINT_INTN_H: u32 = 1;
pub const _BITS_STDINT_UINTN_H: u32 = 1;
pub const _BITS_STDINT_LEAST_H: u32 = 1;
pub const INT8_MIN: i32 = -128;
pub const INT16_MIN: i32 = -32768;
pub const INT32_MIN: i32 = -2147483648;
pub const INT8_MAX: u32 = 127;
pub const INT16_MAX: u32 = 32767;
pub const INT32_MAX: u32 = 2147483647;
pub const UINT8_MAX: u32 = 255;
pub const UINT16_MAX: u32 = 65535;
pub const UINT32_MAX: u32 = 4294967295;
pub const INT_LEAST8_MIN: i32 = -128;
pub const INT_LEAST16_MIN: i32 = -32768;
pub const INT_LEAST32_MIN: i32 = -2147483648;
pub const INT_LEAST8_MAX: u32 = 127;
pub const INT_LEAST16_MAX: u32 = 32767;
pub const INT_LEAST32_MAX: u32 = 2147483647;
pub const UINT_LEAST8_MAX: u32 = 255;
pub const UINT_LEAST16_MAX: u32 = 65535;
pub const UINT_LEAST32_MAX: u32 = 4294967295;
pub const INT_FAST8_MIN: i32 = -128;
pub const INT_FAST16_MIN: i64 = -9223372036854775808;
pub const INT_FAST32_MIN: i64 = -9223372036854775808;
pub const INT_FAST8_MAX: u32 = 127;
pub const INT_FAST16_MAX: u64 = 9223372036854775807;
pub const INT_FAST32_MAX: u64 = 9223372036854775807;
pub const UINT_FAST8_MAX: u32 = 255;
pub const UINT_FAST16_MAX: i32 = -1;
pub const UINT_FAST32_MAX: i32 = -1;
pub const INTPTR_MIN: i64 = -9223372036854775808;
pub const INTPTR_MAX: u64 = 9223372036854775807;
pub const UINTPTR_MAX: i32 = -1;
pub const PTRDIFF_MIN: i64 = -9223372036854775808;
pub const PTRDIFF_MAX: u64 = 9223372036854775807;
pub const SIG_ATOMIC_MIN: i32 = -2147483648;
pub const SIG_ATOMIC_MAX: u32 = 2147483647;
pub const SIZE_MAX: i32 = -1;
pub const WINT_MIN: u32 = 0;
pub const WINT_MAX: u32 = 4294967295;
pub const INT8_WIDTH: u32 = 8;
pub const UINT8_WIDTH: u32 = 8;
pub const INT16_WIDTH: u32 = 16;
pub const UINT16_WIDTH: u32 = 16;
pub const INT32_WIDTH: u32 = 32;
pub const UINT32_WIDTH: u32 = 32;
pub const INT64_WIDTH: u32 = 64;
pub const UINT64_WIDTH: u32 = 64;
pub const INT_LEAST8_WIDTH: u32 = 8;
pub const UINT_LEAST8_WIDTH: u32 = 8;
pub const INT_LEAST16_WIDTH: u32 = 16;
pub const UINT_LEAST16_WIDTH: u32 = 16;
pub const INT_LEAST32_WIDTH: u32 = 32;
pub const UINT_LEAST32_WIDTH: u32 = 32;
pub const INT_LEAST64_WIDTH: u32 = 64;
pub const UINT_LEAST64_WIDTH: u32 = 64;
pub const INT_FAST8_WIDTH: u32 = 8;
pub const UINT_FAST8_WIDTH: u32 = 8;
pub const INT_FAST16_WIDTH: u32 = 64;
pub const UINT_FAST16_WIDTH: u32 = 64;
pub const INT_FAST32_WIDTH: u32 = 64;
pub const UINT_FAST32_WIDTH: u32 = 64;
pub const INT_FAST64_WIDTH: u32 = 64;
pub const UINT_FAST64_WIDTH: u32 = 64;
pub const INTPTR_WIDTH: u32 = 64;
pub const UINTPTR_WIDTH: u32 = 64;
pub const INTMAX_WIDTH: u32 = 64;
pub const UINTMAX_WIDTH: u32 = 64;
pub const PTRDIFF_WIDTH: u32 = 64;
pub const SIG_ATOMIC_WIDTH: u32 = 32;
pub const SIZE_WIDTH: u32 = 64;
pub const WCHAR_WIDTH: u32 = 32;
pub const WINT_WIDTH: u32 = 32;
pub type __u_char = ::core::ffi::c_uchar;
pub type __u_short = ::core::ffi::c_ushort;
pub type __u_int = ::core::ffi::c_uint;
pub type __u_long = ::core::ffi::c_ulong;
pub type __int8_t = ::core::ffi::c_schar;
pub type __uint8_t = ::core::ffi::c_uchar;
pub type __int16_t = ::core::ffi::c_short;
pub type __uint16_t = ::core::ffi::c_ushort;
pub type __int32_t = ::core::ffi::c_int;
pub type __uint32_t = ::core::ffi::c_uint;
pub type __int64_t = ::core::ffi::c_long;
pub type __uint64_t = ::core::ffi::c_ulong;
pub type __int_least8_t = __int8_t;
pub type __uint_least8_t = __uint8_t;
pub type __int_least16_t = __int16_t;
pub type __uint_least16_t = __uint16_t;
pub type __int_least32_t = __int32_t;
pub type __uint_least32_t = __uint32_t;
pub type __int_least64_t = __int64_t;
pub type __uint_least64_t = __uint64_t;
pub type __quad_t = ::core::ffi::c_long;
pub type __u_quad_t = ::core::ffi::c_ulong;
pub type __intmax_t = ::core::ffi::c_long;
pub type __uintmax_t = ::core::ffi::c_ulong;
pub type __dev_t = ::core::ffi::c_ulong;
pub type __uid_t = ::core::ffi::c_uint;
pub type __gid_t = ::core::ffi::c_uint;
pub type __ino_t = ::core::ffi::c_ulong;
pub type __ino64_t = ::core::ffi::c_ulong;
pub type __mode_t = ::core::ffi::c_uint;
pub type __nlink_t = ::core::ffi::c_ulong;
pub type __off_t = ::core::ffi::c_long;
pub type __off64_t = ::core::ffi::c_long;
pub type __pid_t = ::core::ffi::c_int;
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct __fsid_t {
pub __val: [::core::ffi::c_int; 2usize],
}
pub type __clock_t = ::core::ffi::c_long;
pub type __rlim_t = ::core::ffi::c_ulong;
pub type __rlim64_t = ::core::ffi::c_ulong;
pub type __id_t = ::core::ffi::c_uint;
pub type __time_t = ::core::ffi::c_long;
pub type __useconds_t = ::core::ffi::c_uint;
pub type __suseconds_t = ::core::ffi::c_long;
pub type __suseconds64_t = ::core::ffi::c_long;
pub type __daddr_t = ::core::ffi::c_int;
pub type __key_t = ::core::ffi::c_int;
pub type __clockid_t = ::core::ffi::c_int;
pub type __timer_t = *mut ::core::ffi::c_void;
pub type __blksize_t = ::core::ffi::c_long;
pub type __blkcnt_t = ::core::ffi::c_long;
pub type __blkcnt64_t = ::core::ffi::c_long;
pub type __fsblkcnt_t = ::core::ffi::c_ulong;
pub type __fsblkcnt64_t = ::core::ffi::c_ulong;
pub type __fsfilcnt_t = ::core::ffi::c_ulong;
pub type __fsfilcnt64_t = ::core::ffi::c_ulong;
pub type __fsword_t = ::core::ffi::c_long;
pub type __ssize_t = ::core::ffi::c_long;
pub type __syscall_slong_t = ::core::ffi::c_long;
pub type __syscall_ulong_t = ::core::ffi::c_ulong;
pub type __loff_t = __off64_t;
pub type __caddr_t = *mut ::core::ffi::c_char;
pub type __intptr_t = ::core::ffi::c_long;
pub type __socklen_t = ::core::ffi::c_uint;
pub type __sig_atomic_t = ::core::ffi::c_int;
pub type int_least8_t = __int_least8_t;
pub type int_least16_t = __int_least16_t;
pub type int_least32_t = __int_least32_t;
pub type int_least64_t = __int_least64_t;
pub type uint_least8_t = __uint_least8_t;
pub type uint_least16_t = __uint_least16_t;
pub type uint_least32_t = __uint_least32_t;
pub type uint_least64_t = __uint_least64_t;
pub type int_fast8_t = ::core::ffi::c_schar;
pub type int_fast16_t = ::core::ffi::c_long;
pub type int_fast32_t = ::core::ffi::c_long;
pub type int_fast64_t = ::core::ffi::c_long;
pub type uint_fast8_t = ::core::ffi::c_uchar;
pub type uint_fast16_t = ::core::ffi::c_ulong;
pub type uint_fast32_t = ::core::ffi::c_ulong;
pub type uint_fast64_t = ::core::ffi::c_ulong;
pub type intmax_t = __intmax_t;
pub type uintmax_t = __uintmax_t;
pub type z_err_t = u64;
pub const kZionProcessExit: u64 = 1;
pub const kZionProcessSpawn: u64 = 2;
pub const kZionProcessWait: u64 = 3;
pub const kZionThreadCreate: u64 = 16;
pub const kZionThreadStart: u64 = 17;
pub const kZionThreadExit: u64 = 18;
pub const kZionThreadWait: u64 = 19;
pub const kZionThreadSleep: u64 = 20;
pub const kZionAddressSpaceMap: u64 = 33;
pub const kZionAddressSpaceUnmap: u64 = 34;
pub const kZionMemoryObjectCreate: u64 = 48;
pub const kZionMemoryObjectCreatePhysical: u64 = 49;
pub const kZionMemoryObjectCreateContiguous: u64 = 50;
pub const kZionMemoryObjectDuplicate: u64 = 56;
pub const kZionMemoryObjectInspect: u64 = 57;
pub const kZionChannelCreate: u64 = 64;
pub const kZionChannelSend: u64 = 65;
pub const kZionChannelRecv: u64 = 66;
pub const kZionChannelSendRecv: u64 = 67;
pub const kZionPortCreate: u64 = 80;
pub const kZionPortSend: u64 = 81;
pub const kZionPortRecv: u64 = 82;
pub const kZionPortPoll: u64 = 83;
pub const kZionIrqRegister: u64 = 88;
pub const kZionMsiIrqRegister: u64 = 89;
pub const kZionEndpointCreate: u64 = 96;
pub const kZionEndpointSend: u64 = 97;
pub const kZionEndpointRecv: u64 = 98;
pub const kZionReplyPortSend: u64 = 99;
pub const kZionReplyPortRecv: u64 = 100;
pub const kZionEndpointCall: u64 = 101;
pub const kZionCapDuplicate: u64 = 112;
pub const kZionCapRelease: u64 = 113;
pub const kZionMutexCreate: u64 = 128;
pub const kZionMutexLock: u64 = 129;
pub const kZionMutexRelease: u64 = 130;
pub const kZionSemaphoreCreate: u64 = 131;
pub const kZionSemaphoreWait: u64 = 132;
pub const kZionSemaphoreSignal: u64 = 133;
pub const kZionDebug: u64 = 65536;
pub const kZIrqKbd: u64 = 34;
pub const kZIrqPci1: u64 = 48;
pub const kZIrqPci2: u64 = 49;
pub const kZIrqPci3: u64 = 50;
pub const kZIrqPci4: u64 = 51;
pub type z_cap_t = u64;
pub type z_perm_t = u64;
pub const kZionInvalidCapability: u64 = 0;
pub const kZionPerm_Write: u64 = 1;
pub const kZionPerm_Read: u64 = 2;
pub const kZionPerm_Transmit: u64 = 16;
pub const kZionPerm_Duplicate: u64 = 32;
pub const kZionPerm_SpawnProcess: u64 = 256;
pub const kZionPerm_SpawnThread: u64 = 512;
pub const kZionPerm_Lock: u64 = 256;
pub const kZionPerm_Release: u64 = 512;
pub const kZionPerm_Wait: u64 = 256;
pub const kZionPerm_Signal: u64 = 512;
pub const kZionPerm_None: z_perm_t = 0;
pub const kZionPerm_All: z_perm_t = 18446744073709551615;
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct ZFramebufferInfo {
pub address_phys: u64,
pub width: u64,
pub height: u64,
pub pitch: u64,
pub bpp: u16,
pub memory_model: u8,
pub red_mask_size: u8,
pub red_mask_shift: u8,
pub green_mask_size: u8,
pub green_mask_shift: u8,
pub blue_mask_size: u8,
pub blue_mask_shift: u8,
}
extern "C" {
#[link_name = "\u{1}_Z8SysCall1mPKv"]
pub fn SysCall1(code: u64, req: *const ::core::ffi::c_void) -> z_err_t;
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct ZProcessExitReq {
pub code: u64,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct ZProcessSpawnReq {
pub proc_cap: z_cap_t,
pub bootstrap_cap: z_cap_t,
pub new_proc_cap: *mut z_cap_t,
pub new_vmas_cap: *mut z_cap_t,
pub new_bootstrap_cap: *mut z_cap_t,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct ZProcessWaitReq {
pub proc_cap: z_cap_t,
pub exit_code: *mut z_err_t,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct ZThreadCreateReq {
pub proc_cap: z_cap_t,
pub thread_cap: *mut z_cap_t,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct ZThreadStartReq {
pub thread_cap: z_cap_t,
pub entry: u64,
pub arg1: u64,
pub arg2: u64,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct ZThreadExitReq {}
extern "C" {
#[link_name = "\u{1}_Z11ZThreadExitv"]
pub fn ZThreadExit() -> z_err_t;
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct ZThreadWaitReq {
pub thread_cap: z_cap_t,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct ZThreadSleepReq {
pub millis: u64,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct ZAddressSpaceMapReq {
pub vmas_cap: z_cap_t,
pub vmas_offset: u64,
pub vmmo_cap: z_cap_t,
pub align: u64,
pub vaddr: *mut u64,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct ZAddressSpaceUnmapReq {
pub vmas_cap: z_cap_t,
pub lower_addr: u64,
pub upper_addr: u64,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct ZMemoryObjectCreateReq {
pub size: u64,
pub vmmo_cap: *mut z_cap_t,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct ZMemoryObjectCreatePhysicalReq {
pub paddr: u64,
pub size: u64,
pub vmmo_cap: *mut z_cap_t,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct ZMemoryObjectCreateContiguousReq {
pub size: u64,
pub vmmo_cap: *mut z_cap_t,
pub paddr: *mut u64,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct ZMemoryObjectDuplicateReq {
pub vmmo_cap: z_cap_t,
pub base_offset: u64,
pub length: u64,
pub new_vmmo_cap: *mut z_cap_t,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct ZMemoryObjectInspectReq {
pub vmmo_cap: z_cap_t,
pub size: *mut u64,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct ZChannelCreateReq {
pub channel1: *mut z_cap_t,
pub channel2: *mut z_cap_t,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct ZChannelSendReq {
pub chan_cap: z_cap_t,
pub num_bytes: u64,
pub data: *const ::core::ffi::c_void,
pub num_caps: u64,
pub caps: *mut z_cap_t,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct ZChannelRecvReq {
pub chan_cap: z_cap_t,
pub num_bytes: *mut u64,
pub data: *mut ::core::ffi::c_void,
pub num_caps: *mut u64,
pub caps: *mut z_cap_t,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct ZPortCreateReq {
pub port_cap: *mut z_cap_t,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct ZPortSendReq {
pub port_cap: z_cap_t,
pub num_bytes: u64,
pub data: *const ::core::ffi::c_void,
pub num_caps: u64,
pub caps: *mut z_cap_t,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct ZPortRecvReq {
pub port_cap: z_cap_t,
pub num_bytes: *mut u64,
pub data: *mut ::core::ffi::c_void,
pub num_caps: *mut u64,
pub caps: *mut z_cap_t,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct ZPortPollReq {
pub port_cap: z_cap_t,
pub num_bytes: *mut u64,
pub data: *mut ::core::ffi::c_void,
pub num_caps: *mut u64,
pub caps: *mut z_cap_t,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct ZIrqRegisterReq {
pub irq_num: u64,
pub port_cap: *mut z_cap_t,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct ZMsiIrqRegisterReq {
pub irq_num: *mut u64,
pub port_cap: *mut z_cap_t,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct ZEndpointCreateReq {
pub endpoint_cap: *mut z_cap_t,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct ZEndpointSendReq {
pub endpoint_cap: z_cap_t,
pub num_bytes: u64,
pub data: *const ::core::ffi::c_void,
pub num_caps: u64,
pub caps: *const z_cap_t,
pub reply_port_cap: *mut z_cap_t,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct ZEndpointRecvReq {
pub endpoint_cap: z_cap_t,
pub num_bytes: *mut u64,
pub data: *mut ::core::ffi::c_void,
pub num_caps: *mut u64,
pub caps: *mut z_cap_t,
pub reply_port_cap: *mut z_cap_t,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct ZReplyPortSendReq {
pub reply_port_cap: z_cap_t,
pub num_bytes: u64,
pub data: *const ::core::ffi::c_void,
pub num_caps: u64,
pub caps: *const z_cap_t,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct ZReplyPortRecvReq {
pub reply_port_cap: z_cap_t,
pub num_bytes: *mut u64,
pub data: *mut ::core::ffi::c_void,
pub num_caps: *mut u64,
pub caps: *mut z_cap_t,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct ZCapDuplicateReq {
pub cap_in: z_cap_t,
pub perm_mask: z_perm_t,
pub cap_out: *mut z_cap_t,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct ZCapReleaseReq {
pub cap: z_cap_t,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct ZMutexCreateReq {
pub mutex_cap: *mut z_cap_t,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct ZMutexLockReq {
pub mutex_cap: z_cap_t,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct ZMutexReleaseReq {
pub mutex_cap: z_cap_t,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct ZSemaphoreCreateReq {
pub semaphore_cap: *mut z_cap_t,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct ZSemaphoreWaitReq {
pub semaphore_cap: z_cap_t,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct ZSemaphoreSignalReq {
pub semaphore_cap: z_cap_t,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct ZDebugReq {
pub message: *const ::core::ffi::c_char,
pub size: u64,
}

View file

@ -1,48 +0,0 @@
use crate::cap_syscall;
use crate::zion::{z_cap_t, ZError};
pub struct Capability {
cap: z_cap_t,
}
impl Capability {
pub fn take(cap: z_cap_t) -> Self {
Self { cap }
}
pub fn take_copy(cap: z_cap_t) -> Result<Self, ZError> {
Ok(Self::take(cap_syscall::cap_duplicate(
cap,
Self::PERMS_ALL,
)?))
}
pub fn raw(&self) -> z_cap_t {
self.cap
}
pub fn release(mut self) -> z_cap_t {
let cap = self.cap;
self.cap = 0;
cap
}
pub const PERMS_ALL: u64 = u64::MAX;
pub fn duplicate(&self, perm_mask: u64) -> Result<Self, ZError> {
Ok(Self::take(cap_syscall::cap_duplicate(self.cap, perm_mask)?))
}
}
impl Drop for Capability {
fn drop(&mut self) {
if self.cap != 0 {
if let Err(e) = cap_syscall::cap_release(self.cap) {
crate::debug!(
"WARN: error during cap release for cap {:#x}: {:?}",
self.cap,
e
);
}
}
}
}

View file

@ -1,30 +0,0 @@
use core::ffi::c_void;
use crate::zion::{self, z_cap_t, ZError};
fn syscall<T>(id: u64, req: &T) -> Result<(), ZError> {
unsafe {
let resp = zion::SysCall1(id, req as *const T as *const c_void);
if resp != 0 {
return Err(zion::ZError::from(resp));
}
}
Ok(())
}
pub fn cap_duplicate(cap: z_cap_t, perm_mask: u64) -> Result<z_cap_t, ZError> {
let mut new_cap = 0;
syscall(
zion::kZionCapDuplicate,
&zion::ZCapDuplicateReq {
cap_in: cap,
perm_mask,
cap_out: &mut new_cap,
},
)?;
Ok(new_cap)
}
pub fn cap_release(cap: z_cap_t) -> Result<(), ZError> {
syscall(zion::kZionCapRelease, &zion::ZCapReleaseReq { cap })
}

View file

@ -1,343 +0,0 @@
use crate::cap::Capability;
use crate::init;
use crate::syscall;
use crate::zion::ZError;
use alloc::fmt::Debug;
const ELF_MAGIC: u32 = 0x464C457F;
const ELF_IDENT_32BIT: u8 = 0x1;
const ELF_IDENT_64BIT: u8 = 0x2;
const ELF_ENDIAN_LITTLE: u8 = 0x1;
const ELF_ENDIAN_BIG: u8 = 0x2;
const ELF_VERSION_CURRENT: u8 = 0x1;
const ELF_ABI_SYSV: u8 = 0x0;
const ELF_ABI_LINUX: u8 = 0x3;
const ELF_FILE_RELOC: u16 = 0x1;
const ELF_FILE_EXEC: u16 = 0x2;
const ELF_FILE_DYN: u16 = 0x3;
const ELF_FILE_CORE: u16 = 0x4;
const ELF_MACH_X86: u16 = 0x3;
const ELF_MACH_AMD64: u16 = 0x3e;
#[repr(C, packed(1))]
// Header spec from wikipedia: https://en.wikipedia.org/wiki/Executable_and_Linkable_Format#ELF_header
struct Elf64Header {
// 0x7F454C46 (0x7F followed by 'ELF')
magic: u32,
// 1 for 32 bit, 2 for 64 bit.
elf_class: u8,
// 1 for little, 2 for big.
endianess: u8,
// Current version is 1.
ident_version: u8,
// Target OS abi.
abi: u8,
abi_version: u8,
ident_padding: [u8; 7],
file_type: u16,
machine: u16,
version: u32,
entry: u64,
program_header_offset: u64,
section_header_offset: u64,
flags: u32,
header_size: u16,
program_header_entry_size: u16,
program_header_count: u16,
section_header_entry_size: u16,
section_header_entry_number: u16,
section_header_str_index: u16,
}
impl Debug for Elf64Header {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
let magic = self.magic;
let elf_class = match self.elf_class {
ELF_IDENT_32BIT => "32 bit",
ELF_IDENT_64BIT => "64 bit",
_ => "Unknown",
};
let endianess = match self.endianess {
ELF_ENDIAN_LITTLE => "Little",
ELF_ENDIAN_BIG => "Big",
_ => "Unknown",
};
let ident_version = self.ident_version;
let version = self.version;
f.write_fmt(format_args!(
"ELF Header Magic: {:#x}, Class: {}, Endianess: {}, Version (ident): {}, Version: {}\n",
magic, elf_class, endianess, ident_version, version,
))?;
let abi = match self.abi {
ELF_ABI_SYSV => "SYSV",
ELF_ABI_LINUX => "Linux",
_ => "Unknown",
};
let abi_version = self.abi_version;
f.write_fmt(format_args!("\tABI: {}, Version: {}\n", abi, abi_version))?;
let file_type = match self.file_type {
ELF_FILE_EXEC => "Executable",
ELF_FILE_RELOC => "Relocatable",
ELF_FILE_DYN => "Shared Obj",
ELF_FILE_CORE => "Core file",
_ => "Unknown",
};
let machine = match self.machine {
ELF_MACH_X86 => "x86 (32bit)",
ELF_MACH_AMD64 => "x86-64",
_ => "Unknown",
};
let entry_point = self.entry;
f.write_fmt(format_args!(
"\tFile type: {}, Machine Arch: {}, Entry point {:#x}",
file_type, machine, entry_point
))
}
}
fn validate_header(elf_header: &Elf64Header) -> Result<(), ZError> {
if elf_header.magic != ELF_MAGIC {
let magic = elf_header.magic;
debug!(
"Elf header incorrect got {:#x} expected {:#x}",
magic, ELF_MAGIC
);
return Err(ZError::INVALID_ARGUMENT);
}
if elf_header.elf_class != ELF_IDENT_64BIT {
let class = elf_header.elf_class;
debug!(
"Elf class must be {} for 64 bit, got: {}",
ELF_IDENT_64BIT, class
);
return Err(ZError::INVALID_ARGUMENT);
}
if elf_header.endianess != ELF_ENDIAN_LITTLE {
let endianess = elf_header.endianess;
debug!(
"Elf endianess must be {} for little, got: {}",
ELF_ENDIAN_LITTLE, endianess
);
return Err(ZError::INVALID_ARGUMENT);
}
if elf_header.ident_version != ELF_VERSION_CURRENT {
let version = elf_header.ident_version;
debug!(
"Elf version (ident) must be {}, got: {}",
ELF_VERSION_CURRENT, version
);
return Err(ZError::INVALID_ARGUMENT);
}
if elf_header.file_type != ELF_FILE_EXEC {
let file_type = elf_header.file_type;
debug!(
"Elf file type must be {} for executable, got {:x}",
ELF_FILE_EXEC, file_type
);
return Err(ZError::INVALID_ARGUMENT);
}
Ok(())
}
const ELF_PROG_NULL: u32 = 0x0;
const ELF_PROG_LOAD: u32 = 0x1;
const ELF_PROG_DYNAMIC: u32 = 0x2;
const ELF_PROG_INTERP: u32 = 0x3;
const ELF_PROG_NOTE: u32 = 0x4;
const ELF_PROG_SHLIB: u32 = 0x5;
const ELF_PROG_PTHDR: u32 = 0x6;
const ELF_PROG_THREAD_LOCAL: u32 = 0x7;
// Stack unwind tables.
const ELF_PROG_GNU_EH_FRAME: u32 = 0x6474e550;
const ELF_PROG_GNU_STACK: u32 = 0x6474e551;
const ELF_PROG_GNU_RELRO: u32 = 0x6474e552;
#[repr(C, packed(1))]
struct Elf64ProgramHeader {
prog_type: u32,
flags: u32,
offset: u64,
vaddr: u64,
paddr: u64,
file_size: u64,
mem_size: u64,
align: u64,
}
impl Debug for Elf64ProgramHeader {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
let prog_type = match self.prog_type {
ELF_PROG_NULL => "NULL",
ELF_PROG_LOAD => "LOAD",
ELF_PROG_DYNAMIC => "DYNAMIC",
ELF_PROG_INTERP => "INTERP",
ELF_PROG_NOTE => "NOTE",
ELF_PROG_SHLIB => "SHARED LIB",
ELF_PROG_PTHDR => "PROG TABLE HEADER",
ELF_PROG_THREAD_LOCAL => "THREAD LOCAL",
ELF_PROG_GNU_EH_FRAME => "GNU EH FRAME",
ELF_PROG_GNU_RELRO => "GNU RELOCATABLE",
ELF_PROG_GNU_STACK => "GNU STACK",
_ => "UNKNOWN",
};
let offset = self.offset;
let vaddr = self.vaddr;
let paddr = self.paddr;
let file_size = self.file_size;
let mem_size = self.mem_size;
let align = self.align;
f.write_fmt(format_args!(
"Type: {}, offset: {:#x}, vaddr: {:#x}, paddr: {:#x}, file_size: {:#x}, mem_size: {:#x}, align: {:#x}",
prog_type, offset, vaddr, paddr, file_size, mem_size, align
))
}
}
fn load_program_segment(
prog_header: &Elf64ProgramHeader,
file: &[u8],
vmas: &Capability,
) -> Result<(), ZError> {
debug!("{:?}", prog_header);
match prog_header.prog_type {
ELF_PROG_NULL
| ELF_PROG_NOTE
| ELF_PROG_PTHDR
| ELF_PROG_GNU_STACK
| ELF_PROG_GNU_RELRO
| ELF_PROG_GNU_EH_FRAME => Ok(()),
ELF_PROG_LOAD => {
let page_offset = prog_header.vaddr & 0xFFF;
let mem_size = page_offset + prog_header.mem_size;
let mut mem_object = crate::mem::MemoryRegion::new(mem_size)?;
for i in mem_object.mut_slice() {
*i = 0;
}
let file_start = prog_header.offset as usize;
let file_end = file_start + prog_header.file_size as usize;
let from_slice = &file[file_start..file_end];
let mem_start = page_offset as usize;
let mem_end = mem_start + (prog_header.file_size as usize);
let to_slice: &mut [u8] = &mut mem_object.mut_slice()[mem_start..mem_end];
to_slice.copy_from_slice(from_slice);
let vaddr = prog_header.vaddr - page_offset;
syscall::address_space_map_external(vmas, mem_object.cap(), vaddr)
}
ELF_PROG_DYNAMIC => {
debug!("Unimplemented dynamic elf sections.");
Err(ZError::UNIMPLEMENTED)
}
ELF_PROG_INTERP => {
debug!("Unimplemented interpreter elf sections.");
Err(ZError::UNIMPLEMENTED)
}
ELF_PROG_SHLIB => {
debug!("Unimplemented shared lib elf sections.");
Err(ZError::UNIMPLEMENTED)
}
ELF_PROG_THREAD_LOCAL => {
debug!("Unimplemented thread local elf sections.");
Err(ZError::UNIMPLEMENTED)
}
_ => {
let prog_type = prog_header.prog_type;
debug!("Unknown elf program header type: {:#x}", prog_type);
Err(ZError::UNKNOWN)
}
}
}
fn load_elf_program(elf_file: &[u8], vmas: &Capability) -> Result<u64, ZError> {
assert!(elf_file.len() > size_of::<Elf64Header>());
let header: &Elf64Header = unsafe {
elf_file
.as_ptr()
.cast::<Elf64Header>()
.as_ref()
.ok_or(ZError::NULL_PTR)?
};
debug!("{:?}", header);
validate_header(header)?;
for prog_ind in 0..header.program_header_count {
let prog_header_offset = header.program_header_offset
+ ((prog_ind as u64) * (header.program_header_entry_size as u64));
let prog_header_end = prog_header_offset + header.program_header_entry_size as u64;
let prog_header_slice = &elf_file[prog_header_offset as usize..prog_header_end as usize];
let prog_header: &Elf64ProgramHeader = unsafe {
prog_header_slice
.as_ptr()
.cast::<Elf64ProgramHeader>()
.as_ref()
.ok_or(ZError::NULL_PTR)?
};
load_program_segment(prog_header, elf_file, vmas)?;
}
Ok(header.entry)
}
pub fn spawn_process_from_elf_and_init(
elf_file: &[u8],
init_cap: Capability,
) -> Result<Capability, ZError> {
let self_cap = Capability::take_copy(unsafe { init::SELF_PROC_CAP })?;
let port_cap = syscall::port_create()?;
let (new_proc_cap, new_as_cap, foreign_port_id) =
syscall::process_spawn(&self_cap, port_cap.duplicate(Capability::PERMS_ALL)?)?;
let entry_point = load_elf_program(elf_file, &new_as_cap)?;
let port = crate::port::PortClient::take_from(port_cap);
port.write_u64_and_cap(
crate::init::Z_INIT_SELF_PROC,
new_proc_cap.duplicate(Capability::PERMS_ALL)?,
)?;
port.write_u64_and_cap(crate::init::Z_INIT_SELF_VMAS, new_as_cap)?;
port.write_u64_and_cap(crate::init::Z_INIT_ENDPOINT, init_cap)?;
let thread_cap = syscall::thread_create(&new_proc_cap)?;
syscall::thread_start(&thread_cap, entry_point, foreign_port_id, 0)?;
Ok(new_proc_cap)
}
pub fn spawn_process_from_elf(elf_file: &[u8]) -> Result<Capability, ZError> {
let yellowstone = Capability::take_copy(unsafe { crate::init::INIT_ENDPOINT })?;
spawn_process_from_elf_and_init(elf_file, yellowstone)
}

View file

@ -1,50 +0,0 @@
use crate::cap::Capability;
use crate::syscall;
use crate::zion::z_cap_t;
// From /zion/include/ztypes.h
pub const Z_INIT_SELF_PROC: u64 = 0x4000_0000;
pub const Z_INIT_SELF_VMAS: u64 = 0x4000_0001;
pub const Z_INIT_ENDPOINT: u64 = 0x4100_0000;
const Z_BOOT_DENALI_VMMO: u64 = 0x4200_0000;
const Z_BOOT_VICTORIA_FALLS_VMMO: u64 = 0x4200_0001;
const Z_BOOT_PCI_VMMO: u64 = 0x4200_0002;
const Z_BOOT_FRAMEBUFFER_INFO_VMMO: u64 = 0x4200_0003;
pub static mut SELF_PROC_CAP: z_cap_t = 0;
pub static mut SELF_VMAS_CAP: z_cap_t = 0;
pub static mut INIT_ENDPOINT: z_cap_t = 0;
// Boot capabilities, are generally only passed to yellowstone.
pub static mut BOOT_DENALI_VMMO: z_cap_t = 0;
pub static mut BOOT_VICTORIA_FALLS_VMMO: z_cap_t = 0;
pub static mut BOOT_PCI_VMMO: z_cap_t = 0;
pub static mut BOOT_FRAMEBUFFER_INFO_VMMO: z_cap_t = 0;
pub fn parse_init_port(port_cap: z_cap_t) {
let init_port = Capability::take(port_cap);
loop {
let mut bytes: [u8; 8] = [0; 8];
let mut caps: [u64; 1] = [0];
let resp = syscall::port_poll(&init_port, &mut bytes, &mut caps);
if resp.is_err() {
break;
}
let init_sig = u64::from_le_bytes(bytes);
unsafe {
match init_sig {
Z_INIT_SELF_PROC => SELF_PROC_CAP = caps[0],
Z_INIT_SELF_VMAS => SELF_VMAS_CAP = caps[0],
Z_INIT_ENDPOINT => INIT_ENDPOINT = caps[0],
Z_BOOT_DENALI_VMMO => BOOT_DENALI_VMMO = caps[0],
Z_BOOT_VICTORIA_FALLS_VMMO => BOOT_VICTORIA_FALLS_VMMO = caps[0],
Z_BOOT_PCI_VMMO => BOOT_PCI_VMMO = caps[0],
Z_BOOT_FRAMEBUFFER_INFO_VMMO => BOOT_FRAMEBUFFER_INFO_VMMO = caps[0],
_ => syscall::debug("Unknown Cap in Init"),
}
}
}
}

View file

@ -1,21 +0,0 @@
#![no_std]
#![allow(non_upper_case_globals)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
extern crate alloc;
#[macro_use]
pub mod macros;
pub mod cap;
mod cap_syscall;
pub mod elf;
pub mod init;
pub mod mem;
pub mod port;
pub mod sync;
pub mod syscall;
pub mod task;
pub mod thread;
pub mod zion;

View file

@ -1,61 +0,0 @@
use alloc::string::String;
use alloc::vec::Vec;
use core::fmt;
#[derive(Default)]
pub struct Writer {
int_vec: Vec<u8>,
}
impl Writer {
pub fn new() -> Self {
Writer::default()
}
}
impl From<Writer> for String {
fn from(value: Writer) -> Self {
String::from_utf8(value.int_vec).expect("Failed to convert")
}
}
impl fmt::Write for Writer {
fn write_str(&mut self, s: &str) -> fmt::Result {
for c in s.bytes() {
self.int_vec.push(c);
}
fmt::Result::Ok(())
}
}
#[macro_export]
macro_rules! debug {
() => {
$crate::syscall::debug("");
};
($fmt:literal) => {
$crate::syscall::debug($fmt);
};
($fmt:literal, $($val:expr),+) => {{
use core::fmt::Write as _;
let mut w = $crate::macros::Writer::new();
write!(&mut w, $fmt, $($val),*).expect("Failed to format");
let s: alloc::string::String = w.into();
$crate::syscall::debug(&s);
}};
}
#[macro_export]
macro_rules! define_entry {
() => {
#[no_mangle]
pub extern "C" fn _start(init_port: $crate::zion::z_cap_t) -> ! {
$crate::init::parse_init_port(init_port);
$crate::mem::init_heap();
let resp = main();
$crate::syscall::process_exit(resp);
}
};
}

View file

@ -1,197 +0,0 @@
use crate::cap::Capability;
use crate::syscall;
use crate::zion::ZError;
use alloc::slice;
use core::fmt::Debug;
use core::ptr::{addr_of, addr_of_mut};
#[cfg(feature = "hosted")]
use linked_list_allocator::LockedHeap;
#[cfg(feature = "hosted")]
#[global_allocator]
static ALLOCATOR: LockedHeap = LockedHeap::empty();
pub static mut CAN_ALLOC: bool = false;
#[cfg(feature = "hosted")]
pub fn init_heap() {
// 1 MiB
let size = 0x10_0000;
let vmmo_cap = syscall::memory_object_create(size).expect("Failed to create memory object");
let vaddr = syscall::address_space_map(&vmmo_cap).expect("Failed to map memory object");
unsafe {
ALLOCATOR.lock().init(vaddr as *mut u8, size as usize);
CAN_ALLOC = true;
}
}
pub struct MemoryRegion {
mem_cap: Capability,
virt_addr: u64,
size: u64,
}
impl MemoryRegion {
pub fn direct_physical(paddr: u64, size: u64) -> Result<Self, ZError> {
let mem_cap = syscall::memory_object_direct_physical(paddr, size)?;
let virt_addr = syscall::address_space_map(&mem_cap)?;
Ok(Self {
mem_cap,
virt_addr,
size,
})
}
pub fn contiguous_physical(size: u64) -> Result<(Self, u64), ZError> {
let (mem_cap, paddr) = syscall::memory_object_contiguous_physical(size)?;
let virt_addr = syscall::address_space_map(&mem_cap)?;
Ok((
Self {
mem_cap,
virt_addr,
size,
},
paddr,
))
}
pub fn from_cap(mem_cap: Capability) -> Result<Self, ZError> {
let virt_addr = syscall::address_space_map(&mem_cap)?;
let size = syscall::memory_object_inspect(&mem_cap)?;
Ok(Self {
mem_cap,
virt_addr,
size,
})
}
pub fn new(size: u64) -> Result<Self, ZError> {
let mem_cap = syscall::memory_object_create(size)?;
let virt_addr = syscall::address_space_map(&mem_cap)?;
Ok(Self {
mem_cap,
virt_addr,
size,
})
}
pub fn slice<T>(&self) -> &[T] {
unsafe {
slice::from_raw_parts(
self.virt_addr as *const T,
self.size as usize / size_of::<T>(),
)
}
}
pub fn mut_slice<T>(&mut self) -> &mut [T] {
unsafe {
slice::from_raw_parts_mut(
self.virt_addr as *mut T,
self.size as usize / size_of::<T>(),
)
}
}
pub fn raw_ptr_at_offset<T>(&self, offset: u64) -> *const T {
// TODO: Come up with a better safety check here.
// We can't use the size of T because it might not be sized.
assert!(offset + size_of::<T>() as u64 <= self.size);
(self.virt_addr + offset) as *const T
}
pub fn cap(&self) -> &Capability {
&self.mem_cap
}
pub fn size(&self) -> u64 {
self.size
}
pub fn duplicate(&self, offset: u64, length: u64) -> Result<Capability, ZError> {
syscall::memory_obj_duplicate(&self.mem_cap, offset, length)
}
}
impl<T> AsRef<T> for MemoryRegion {
fn as_ref(&self) -> &T {
unsafe { (self.virt_addr as *const T).as_ref().unwrap() }
}
}
impl<T> AsMut<T> for MemoryRegion {
fn as_mut(&mut self) -> &mut T {
unsafe { (self.virt_addr as *mut T).as_mut().unwrap() }
}
}
impl Drop for MemoryRegion {
fn drop(&mut self) {
// FIXME: We shouldn't have to do this manual adjustment.
let mut max = self.virt_addr + self.size;
if (max & 0xFFF) != 0 {
max += 0x1000 - (max & 0xFFF);
}
syscall::address_space_unmap(self.virt_addr, max).expect("Failed to unmap memory");
}
}
pub struct Volatile<T> {
/// TODO: This should maybe be MaybeUninit.
data: T,
}
impl<T> Volatile<T> {
pub fn read(&self) -> T
where
T: Copy,
{
unsafe { addr_of!(self.data).cast::<T>().read_volatile() }
}
pub fn write(&mut self, data: T) {
unsafe {
addr_of_mut!(self.data).cast::<T>().write_volatile(data);
}
}
pub fn update<F>(&mut self, func: F)
where
T: Copy,
F: Fn(&mut T),
{
let mut data = self.read();
func(&mut data);
self.write(data);
}
}
impl<T> Debug for Volatile<T>
where
T: Debug + Copy,
{
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
write!(f, "{:?}", self.read())
}
}
pub fn map_cap_and_leak(mem_cap: Capability) -> u64 {
let vaddr = syscall::address_space_map(&mem_cap).unwrap();
mem_cap.release();
vaddr
}
pub fn map_direct_physical_and_leak(paddr: u64, size: u64) -> u64 {
let mem_cap = syscall::memory_object_direct_physical(paddr, size).unwrap();
let vaddr = syscall::address_space_map(&mem_cap).unwrap();
mem_cap.release();
vaddr
}
pub fn map_physical_and_leak(size: u64) -> (u64, u64) {
let (mem_cap, paddr) = syscall::memory_object_contiguous_physical(size).unwrap();
let vaddr = syscall::address_space_map(&mem_cap).unwrap();
mem_cap.release();
(vaddr, paddr)
}

View file

@ -1,68 +0,0 @@
use crate::cap::Capability;
use crate::syscall::{port_create, port_recv};
use crate::zion::{kZionPerm_Read, z_cap_t, ZError};
pub struct PortServer {
port_cap: Capability,
}
impl PortServer {
pub fn new() -> Result<Self, ZError> {
Ok(Self {
port_cap: port_create()?,
})
}
pub fn from_cap(port_cap: Capability) -> Self {
Self { port_cap }
}
pub fn create_client_cap(&self) -> Result<z_cap_t, ZError> {
self.port_cap
.duplicate(!kZionPerm_Read)
.map(|c| c.release())
}
pub fn recv_byte(&self) -> Result<u8, ZError> {
let mut caps: [z_cap_t; 0] = [];
let mut bytes: [u8; 1] = [0];
port_recv(&self.port_cap, &mut bytes, &mut caps)?;
Ok(bytes[0])
}
pub fn recv_u16(&self) -> Result<u16, ZError> {
let mut caps: [z_cap_t; 0] = [];
let mut bytes: [u8; 2] = [0; 2];
port_recv(&self.port_cap, &mut bytes, &mut caps)?;
Ok(u16::from_le_bytes(bytes))
}
pub fn recv_null(&self) -> Result<(), ZError> {
let mut caps: [z_cap_t; 0] = [];
let mut bytes: [u8; 0] = [];
port_recv(&self.port_cap, &mut bytes, &mut caps)?;
Ok(())
}
}
pub struct PortClient {
port_cap: Capability,
}
impl PortClient {
pub fn take_from(port_cap: Capability) -> Self {
Self { port_cap }
}
#[warn(unused_results)]
pub fn write_u64_and_cap(&self, bytes: u64, cap: Capability) -> Result<(), ZError> {
let mut caps: [z_cap_t; 1] = [cap.release()];
crate::syscall::port_send(&self.port_cap, &bytes.to_le_bytes(), &mut caps)
}
}

View file

@ -1,81 +0,0 @@
use core::cell::UnsafeCell;
use core::ops::Deref;
use core::ops::DerefMut;
use crate::{cap::Capability, syscall, zion::ZError};
pub struct Semaphore {
cap: Capability,
}
impl Semaphore {
pub fn new() -> Result<Self, ZError> {
syscall::semaphore_create().map(|cap| Self { cap })
}
pub fn wait(&self) -> Result<(), ZError> {
syscall::semaphone_wait(&self.cap)
}
pub fn signal(&self) -> Result<(), ZError> {
syscall::semaphone_signal(&self.cap)
}
}
pub struct Mutex<T> {
cap: Capability,
data: UnsafeCell<T>,
}
unsafe impl<T> Sync for Mutex<T> where T: Send {}
pub struct MutexGuard<'a, T> {
mutex: &'a Mutex<T>,
}
unsafe impl<T> Send for MutexGuard<'_, T> where T: Send {}
unsafe impl<T> Sync for MutexGuard<'_, T> where T: Sync {}
impl<T> Deref for MutexGuard<'_, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
unsafe { &*self.mutex.data.get() }
}
}
impl<T> DerefMut for MutexGuard<'_, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { &mut *self.mutex.data.get() }
}
}
impl<'a, T> Mutex<T> {
pub fn new(data: T) -> Mutex<T> {
Mutex {
cap: syscall::mutex_create().unwrap(),
data: UnsafeCell::new(data),
}
}
pub fn lock(&'a self) -> MutexGuard<'a, T> {
syscall::mutex_lock(&self.cap).unwrap();
MutexGuard { mutex: self }
}
}
impl<T> Drop for MutexGuard<'_, T> {
fn drop(&mut self) {
syscall::mutex_release(&self.mutex.cap).unwrap();
}
}
impl<T> Default for Mutex<T>
where
T: Default,
{
fn default() -> Self {
Self::new(T::default())
}
}

View file

@ -1,473 +0,0 @@
extern crate alloc;
use crate::cap::Capability;
use crate::zion;
use crate::zion::z_cap_t;
use crate::zion::ZError;
use core::ffi::c_void;
#[cfg(feature = "hosted")]
use core::panic::PanicInfo;
fn syscall<T>(id: u64, req: &T) -> Result<(), ZError> {
unsafe {
let resp = zion::SysCall1(id, req as *const T as *const c_void);
if resp != 0 {
return Err(zion::ZError::from(resp));
}
}
Ok(())
}
#[cfg(feature = "hosted")]
#[panic_handler]
fn panic(info: &PanicInfo) -> ! {
unsafe {
if crate::mem::CAN_ALLOC {
crate::debug!("Panic occured: {}", info);
} else {
debug("Panic occured before heap initialized.")
}
}
// Internal error.
let req = zion::ZProcessExitReq { code: 0x100 };
let _ = syscall(zion::kZionProcessExit, &req);
unreachable!()
}
pub fn debug(msg: &str) {
let req = zion::ZDebugReq {
message: msg.as_ptr() as *const i8,
size: msg.len() as u64,
};
syscall(zion::kZionDebug, &req).expect("Failed to write");
}
pub fn process_spawn(
proc_cap: &Capability,
bootstrap_cap: Capability,
) -> Result<(Capability, Capability, u64), ZError> {
let mut new_proc_cap = 0;
let mut new_as_cap = 0;
let mut new_bootstrap_cap = 0;
syscall(
zion::kZionProcessSpawn,
&zion::ZProcessSpawnReq {
proc_cap: proc_cap.raw(),
bootstrap_cap: bootstrap_cap.release(),
new_proc_cap: &mut new_proc_cap,
new_vmas_cap: &mut new_as_cap,
new_bootstrap_cap: &mut new_bootstrap_cap,
},
)?;
Ok((
Capability::take(new_proc_cap),
Capability::take(new_as_cap),
new_bootstrap_cap,
))
}
pub fn process_exit(code: u64) -> ! {
let _ = syscall(zion::kZionProcessExit, &zion::ZProcessExitReq { code });
unreachable!()
}
pub fn process_wait(proc_cap: &Capability) -> Result<u64, ZError> {
let mut err_code = 0;
syscall(
zion::kZionProcessWait,
&zion::ZProcessWaitReq {
proc_cap: proc_cap.raw(),
exit_code: &mut err_code,
},
)?;
Ok(err_code)
}
pub fn thread_create(proc_cap: &Capability) -> Result<Capability, ZError> {
let mut cap = 0;
syscall(
zion::kZionThreadCreate,
&zion::ZThreadCreateReq {
proc_cap: proc_cap.raw(),
thread_cap: &mut cap,
},
)?;
Ok(Capability::take(cap))
}
pub fn thread_sleep(millis: u64) -> Result<(), ZError> {
syscall(zion::kZionThreadSleep, &zion::ZThreadSleepReq { millis })
}
pub fn thread_start(
thread_cap: &Capability,
entry: u64,
arg1: u64,
arg2: u64,
) -> Result<(), ZError> {
syscall(
zion::kZionThreadStart,
&zion::ZThreadStartReq {
thread_cap: thread_cap.raw(),
entry,
arg1,
arg2,
},
)
}
pub fn thread_wait(thread_cap: &Capability) -> Result<(), ZError> {
syscall(
zion::kZionThreadWait,
&zion::ZThreadWaitReq {
thread_cap: thread_cap.raw(),
},
)
}
pub fn thread_exit() -> ! {
let _ = syscall(zion::kZionThreadExit, &zion::ZThreadExitReq {});
unreachable!();
}
pub fn memory_object_create(size: u64) -> Result<Capability, ZError> {
let mut vmmo_cap = 0;
let obj_req = zion::ZMemoryObjectCreateReq {
size,
vmmo_cap: &mut vmmo_cap as *mut u64,
};
syscall(zion::kZionMemoryObjectCreate, &obj_req)?;
Ok(Capability::take(vmmo_cap))
}
pub fn memory_object_direct_physical(paddr: u64, size: u64) -> Result<Capability, ZError> {
let mut vmmo_cap = 0;
syscall(
zion::kZionMemoryObjectCreatePhysical,
&zion::ZMemoryObjectCreatePhysicalReq {
paddr,
size,
vmmo_cap: &mut vmmo_cap,
},
)?;
Ok(Capability::take(vmmo_cap))
}
pub fn memory_object_contiguous_physical(size: u64) -> Result<(Capability, u64), ZError> {
let mut vmmo_cap = 0;
let mut paddr = 0;
syscall(
zion::kZionMemoryObjectCreateContiguous,
&zion::ZMemoryObjectCreateContiguousReq {
size,
paddr: &mut paddr,
vmmo_cap: &mut vmmo_cap,
},
)?;
Ok((Capability::take(vmmo_cap), paddr))
}
pub fn memory_object_inspect(mem_cap: &Capability) -> Result<u64, ZError> {
let mut mem_size = 0;
syscall(
zion::kZionMemoryObjectInspect,
&zion::ZMemoryObjectInspectReq {
vmmo_cap: mem_cap.raw(),
size: &mut mem_size,
},
)?;
Ok(mem_size)
}
pub fn memory_obj_duplicate(
mem_cap: &Capability,
base_offset: u64,
length: u64,
) -> Result<Capability, ZError> {
let mut new_cap = 0;
syscall(
zion::kZionMemoryObjectDuplicate,
&zion::ZMemoryObjectDuplicateReq {
vmmo_cap: mem_cap.raw(),
base_offset,
length,
new_vmmo_cap: &mut new_cap,
},
)?;
Ok(Capability::take(new_cap))
}
pub fn address_space_map(vmmo_cap: &Capability) -> Result<u64, ZError> {
let mut vaddr: u64 = 0;
// FIXME: Allow caller to pass these options.
let vmas_req = zion::ZAddressSpaceMapReq {
vmmo_cap: vmmo_cap.raw(),
vmas_cap: unsafe { crate::init::SELF_VMAS_CAP },
align: 0x2000,
vaddr: &mut vaddr as *mut u64,
vmas_offset: 0,
};
syscall(zion::kZionAddressSpaceMap, &vmas_req)?;
Ok(vaddr)
}
pub fn address_space_map_external(
vmas_cap: &Capability,
vmmo_cap: &Capability,
vaddr: u64,
) -> Result<(), ZError> {
let mut vaddr_throw: u64 = 0;
let vmas_req = zion::ZAddressSpaceMapReq {
vmas_cap: vmas_cap.raw(),
vmmo_cap: vmmo_cap.raw(),
align: 0,
vaddr: &mut vaddr_throw as *mut u64,
vmas_offset: vaddr,
};
syscall(zion::kZionAddressSpaceMap, &vmas_req)?;
Ok(())
}
pub fn address_space_unmap(lower_addr: u64, upper_addr: u64) -> Result<(), ZError> {
syscall(
zion::kZionAddressSpaceUnmap,
&zion::ZAddressSpaceUnmapReq {
vmas_cap: unsafe { crate::init::SELF_VMAS_CAP },
lower_addr,
upper_addr,
},
)
}
pub fn port_create() -> Result<Capability, ZError> {
let mut port_cap = 0;
syscall(
zion::kZionPortCreate,
&zion::ZPortCreateReq {
port_cap: &mut port_cap,
},
)?;
Ok(Capability::take(port_cap))
}
pub fn port_send(port_cap: &Capability, bytes: &[u8], caps: &mut [z_cap_t]) -> Result<(), ZError> {
syscall(
zion::kZionPortSend,
&zion::ZPortSendReq {
port_cap: port_cap.raw(),
num_bytes: bytes.len() as u64,
data: bytes.as_ptr() as *const c_void,
num_caps: caps.len() as u64,
// FIXME: This shouldn't need to be mutable.
caps: caps.as_mut_ptr(),
},
)
}
pub fn port_recv(
port_cap: &Capability,
bytes: &mut [u8],
caps: &mut [u64],
) -> Result<(u64, u64), ZError> {
let mut num_bytes = bytes.len() as u64;
let mut num_caps = caps.len() as u64;
syscall(
zion::kZionPortRecv,
&zion::ZPortRecvReq {
port_cap: port_cap.raw(),
data: bytes.as_mut_ptr() as *mut c_void,
num_bytes: &mut num_bytes as *mut u64,
caps: caps.as_mut_ptr(),
num_caps: &mut num_caps as *mut u64,
},
)?;
Ok((num_bytes, num_caps))
}
pub fn port_poll(
port_cap: &Capability,
bytes: &mut [u8],
caps: &mut [u64],
) -> Result<(u64, u64), ZError> {
let mut num_bytes = bytes.len() as u64;
let mut num_caps = caps.len() as u64;
let req = zion::ZPortPollReq {
port_cap: port_cap.raw(),
data: bytes.as_mut_ptr() as *mut c_void,
num_bytes: &mut num_bytes as *mut u64,
caps: caps.as_mut_ptr(),
num_caps: &mut num_caps as *mut u64,
};
syscall(zion::kZionPortPoll, &req)?;
Ok((num_bytes, num_caps))
}
pub fn register_msi_irq() -> Result<(Capability, u64), ZError> {
let mut irq_num: u64 = 0;
let mut port_cap: z_cap_t = 0;
syscall(
zion::kZionMsiIrqRegister,
&zion::ZMsiIrqRegisterReq {
irq_num: &mut irq_num as *mut u64,
port_cap: &mut port_cap,
},
)?;
Ok((Capability::take(port_cap), irq_num))
}
pub fn endpoint_create() -> Result<Capability, ZError> {
let mut endpoint_cap: z_cap_t = 0;
syscall(
zion::kZionEndpointCreate,
&zion::ZEndpointCreateReq {
endpoint_cap: &mut endpoint_cap,
},
)?;
Ok(Capability::take(endpoint_cap))
}
pub fn endpoint_send(
endpoint_cap: &Capability,
bytes: &[u8],
caps: &[z_cap_t],
) -> Result<Capability, ZError> {
let mut reply_port_cap: u64 = 0;
let send_req = zion::ZEndpointSendReq {
caps: caps.as_ptr(),
num_caps: caps.len() as u64,
endpoint_cap: endpoint_cap.raw(),
data: bytes.as_ptr() as *const c_void,
num_bytes: bytes.len() as u64,
reply_port_cap: &mut reply_port_cap,
};
syscall(zion::kZionEndpointSend, &send_req)?;
Ok(Capability::take(reply_port_cap))
}
pub fn endpoint_recv(
endpoint_cap: &Capability,
bytes: &mut [u8],
caps: &mut [z_cap_t],
) -> Result<(u64, u64, Capability), ZError> {
let mut num_bytes = bytes.len() as u64;
let mut num_caps = caps.len() as u64;
let mut reply_port_cap = 0;
let recv_req = zion::ZEndpointRecvReq {
endpoint_cap: endpoint_cap.raw(),
data: bytes.as_mut_ptr() as *mut c_void,
num_bytes: &mut num_bytes,
caps: caps.as_mut_ptr(),
num_caps: &mut num_caps,
reply_port_cap: &mut reply_port_cap,
};
syscall(zion::kZionEndpointRecv, &recv_req)?;
Ok((num_bytes, num_caps, Capability::take(reply_port_cap)))
}
pub fn reply_port_send(
reply_port_cap: Capability,
bytes: &[u8],
caps: &[z_cap_t],
) -> Result<(), ZError> {
syscall(
zion::kZionReplyPortSend,
&zion::ZReplyPortSendReq {
reply_port_cap: reply_port_cap.raw(),
data: bytes.as_ptr() as *const c_void,
num_bytes: bytes.len() as u64,
caps: caps.as_ptr(),
num_caps: caps.len() as u64,
},
)
}
pub fn reply_port_recv(
reply_port_cap: Capability,
bytes: &mut [u8],
caps: &mut [z_cap_t],
) -> Result<(u64, u64), ZError> {
let mut num_bytes = bytes.len() as u64;
let mut num_caps = caps.len() as u64;
let recv_req = zion::ZReplyPortRecvReq {
reply_port_cap: reply_port_cap.raw(),
caps: caps.as_mut_ptr(),
num_caps: &mut num_caps,
data: bytes.as_mut_ptr() as *mut c_void,
num_bytes: &mut num_bytes,
};
syscall(zion::kZionReplyPortRecv, &recv_req)?;
Ok((num_bytes, num_caps))
}
pub fn mutex_create() -> Result<Capability, ZError> {
let mut mutex_cap: z_cap_t = 0;
syscall(
zion::kZionMutexCreate,
&zion::ZMutexCreateReq {
mutex_cap: &mut mutex_cap,
},
)?;
Ok(Capability::take(mutex_cap))
}
pub fn mutex_lock(mutex_cap: &Capability) -> Result<(), ZError> {
syscall(
zion::kZionMutexLock,
&zion::ZMutexLockReq {
mutex_cap: mutex_cap.raw(),
},
)
}
pub fn mutex_release(mutex_cap: &Capability) -> Result<(), ZError> {
syscall(
zion::kZionMutexRelease,
&zion::ZMutexReleaseReq {
mutex_cap: mutex_cap.raw(),
},
)
}
pub fn semaphore_create() -> Result<Capability, ZError> {
let mut sem_cap: z_cap_t = 0;
syscall(
zion::kZionSemaphoreCreate,
&zion::ZSemaphoreCreateReq {
semaphore_cap: &mut sem_cap,
},
)?;
Ok(Capability::take(sem_cap))
}
pub fn semaphone_signal(sem_cap: &Capability) -> Result<(), ZError> {
syscall(
zion::kZionSemaphoreSignal,
&zion::ZSemaphoreSignalReq {
semaphore_cap: sem_cap.raw(),
},
)
}
pub fn semaphone_wait(sem_cap: &Capability) -> Result<(), ZError> {
syscall(
zion::kZionSemaphoreWait,
&zion::ZSemaphoreWaitReq {
semaphore_cap: sem_cap.raw(),
},
)
}

View file

@ -1,145 +0,0 @@
use core::{
future::Future,
pin::Pin,
sync::atomic::{AtomicU64, Ordering},
task::{Context, Poll, Waker},
};
use alloc::{
boxed::Box,
collections::{BTreeMap, VecDeque},
sync::Arc,
task::Wake,
};
use crate::{sync::Mutex, syscall};
#[derive(PartialEq, Eq, PartialOrd, Ord, Copy, Clone)]
struct TaskId(u64);
impl TaskId {
pub fn new() -> TaskId {
static NEXT_ID: AtomicU64 = AtomicU64::new(0);
TaskId(NEXT_ID.fetch_add(1, Ordering::Relaxed))
}
}
pub struct Task {
id: TaskId,
future: Pin<Box<dyn Future<Output = ()> + Send>>,
}
impl Task {
pub fn new(future: impl Future<Output = ()> + Sync + Send + 'static) -> Task {
Task {
id: TaskId::new(),
future: Box::pin(future),
}
}
pub fn poll(&mut self, context: &mut Context) -> Poll<()> {
self.future.as_mut().poll(context)
}
}
struct TaskWaker {
task_id: TaskId,
task_queue: Arc<Mutex<VecDeque<TaskId>>>,
}
impl TaskWaker {
fn create_waker(task_id: TaskId, task_queue: Arc<Mutex<VecDeque<TaskId>>>) -> Waker {
Waker::from(Arc::new(TaskWaker {
task_id,
task_queue,
}))
}
fn wake_task(&self) {
self.task_queue.lock().push_back(self.task_id);
}
}
impl Wake for TaskWaker {
fn wake(self: Arc<Self>) {
self.wake_task();
}
fn wake_by_ref(self: &Arc<Self>) {
self.wake_task();
}
}
#[derive(Default)]
pub struct Executor {
tasks: Arc<Mutex<BTreeMap<TaskId, Task>>>,
// TODO: Consider a better datastructure for this.
task_queue: Arc<Mutex<VecDeque<TaskId>>>,
waker_cache: BTreeMap<TaskId, Waker>,
}
impl Executor {
pub fn new() -> Executor {
Executor::default()
}
pub fn spawn(&mut self, task: Task) {
let task_id = task.id;
if self.tasks.lock().insert(task_id, task).is_some() {
panic!("Task is already existed in executor map");
}
self.task_queue.lock().push_back(task_id);
}
fn run_ready_tasks(&mut self) {
while let Some(task_id) = self.task_queue.lock().pop_front() {
let mut tasks = self.tasks.lock();
let task = tasks.get_mut(&task_id).unwrap();
let waker = self
.waker_cache
.entry(task_id)
.or_insert_with(|| TaskWaker::create_waker(task_id, self.task_queue.clone()));
let mut ctx = Context::from_waker(waker);
match task.poll(&mut ctx) {
Poll::Ready(()) => {
tasks.remove(&task_id);
self.waker_cache.remove(&task_id);
}
Poll::Pending => {}
};
}
}
pub fn run(&mut self) {
loop {
self.run_ready_tasks();
// TODO: We need some sort of semaphore wait here.
syscall::thread_sleep(10).unwrap();
}
}
pub fn new_spawner(&self) -> Spawner {
Spawner::new(self.tasks.clone(), self.task_queue.clone())
}
}
pub struct Spawner {
tasks: Arc<Mutex<BTreeMap<TaskId, Task>>>,
task_queue: Arc<Mutex<VecDeque<TaskId>>>,
}
impl Spawner {
fn new(
tasks: Arc<Mutex<BTreeMap<TaskId, Task>>>,
task_queue: Arc<Mutex<VecDeque<TaskId>>>,
) -> Self {
Spawner { tasks, task_queue }
}
pub fn spawn(&self, task: Task) {
let task_id = task.id;
if self.tasks.lock().insert(task_id, task).is_some() {
panic!("Task is already existed in executor map");
}
self.task_queue.lock().push_back(task_id);
}
}

View file

@ -1,46 +0,0 @@
use crate::cap::Capability;
use crate::syscall;
use crate::zion;
use alloc::boxed::Box;
use core::ffi::c_void;
pub struct JoinHandle {
cap: Capability,
}
impl JoinHandle {
pub fn join(&self) -> Result<(), zion::ZError> {
syscall::thread_wait(&self.cap)
}
}
#[no_mangle]
extern "C" fn entry_point(func: *mut c_void) -> ! {
unsafe {
Box::from_raw(func as *mut Box<dyn FnOnce()>)();
}
syscall::thread_exit()
}
pub fn spawn<F>(f: F) -> JoinHandle
where
F: FnOnce() + Send + 'static,
{
// This is very tricky.
// If we have been passed a closure that doesn't capture
// anything it will be 0 size and creating a Box of it
// will create a pointer with address 0x1.
// So we create this "main" closure that captures f to get around this.
// Also somehow having the explicit type annotation here is important.
let main: Box<dyn FnOnce()> = Box::new(move || {
f();
});
let raw_main = Box::into_raw(Box::new(main));
let proc_cap = Capability::take_copy(unsafe { crate::init::SELF_PROC_CAP }).unwrap();
let cap = syscall::thread_create(&proc_cap).unwrap();
syscall::thread_start(&cap, entry_point as usize as u64, raw_main as u64, 0).unwrap();
JoinHandle { cap }
}

View file

@ -1,75 +0,0 @@
include!("bindings.rs");
use core::fmt;
pub enum ZError {
UNKNOWN = 0x0,
// First set of error codes generally indicate user errors.
INVALID_ARGUMENT = 0x1,
NOT_FOUND = 0x2,
PERMISSION_DENIED = 0x3,
NULL_PTR = 0x4,
EMPTY = 0x5,
ALREADY_EXISTS = 0x6,
BUFFER_SIZE = 0x7,
FAILED_PRECONDITION = 0x8,
// Second set of error codes generally indicate service errors.
INTERNAL = 0x100,
UNIMPLEMENTED = 0x101,
EXHAUSTED = 0x102,
INVALID_RESPONSE = 0x103,
// Kernel specific error codes (relating to capabilities).
CAP_NOT_FOUND = 0x1000,
CAP_WRONG_TYPE = 0x1001,
CAP_PERMISSION_DENIED = 0x1002,
}
impl From<u64> for ZError {
fn from(value: u64) -> Self {
match value {
0x1 => ZError::INVALID_ARGUMENT,
0x2 => ZError::NOT_FOUND,
0x3 => ZError::PERMISSION_DENIED,
0x4 => ZError::NULL_PTR,
0x5 => ZError::EMPTY,
0x6 => ZError::ALREADY_EXISTS,
0x7 => ZError::BUFFER_SIZE,
0x8 => ZError::FAILED_PRECONDITION,
0x100 => ZError::INTERNAL,
0x101 => ZError::UNIMPLEMENTED,
0x102 => ZError::EXHAUSTED,
0x103 => ZError::INVALID_RESPONSE,
0x1000 => ZError::CAP_NOT_FOUND,
0x1001 => ZError::CAP_WRONG_TYPE,
0x1002 => ZError::CAP_PERMISSION_DENIED,
_ => ZError::UNKNOWN,
}
}
}
impl fmt::Debug for ZError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let str = match self {
ZError::INVALID_ARGUMENT => "INVALID_ARGUMENT",
ZError::NOT_FOUND => "NOT_FOUND",
ZError::PERMISSION_DENIED => "PERMISSION_DENIED",
ZError::NULL_PTR => "NULL_PTR",
ZError::EMPTY => "EMPTY",
ZError::ALREADY_EXISTS => "ALREADY_EXISTS",
ZError::BUFFER_SIZE => "BUFFER_SIZE",
ZError::FAILED_PRECONDITION => "FAILED_PRECONDITION",
ZError::INTERNAL => "INTERNAL",
ZError::UNIMPLEMENTED => "UNIMPLEMENTED",
ZError::INVALID_RESPONSE => "INVALID_RESPONSE",
ZError::CAP_NOT_FOUND => "CAP_NOT_FOUND",
ZError::CAP_WRONG_TYPE => "CAP_WRONG_TYPE",
ZError::CAP_PERMISSION_DENIED => "CAP_PERMISSION_DENIED",
_ => "ZError",
};
f.write_str(str)
}
}

View file

@ -1,8 +0,0 @@
[package]
name = "pci"
version = "0.1.0"
edition = "2024"
[dependencies]
bitfield-struct = "0.8.0"
mammoth = {path = "../mammoth/"}

View file

@ -1,97 +0,0 @@
use alloc::vec::Vec;
use mammoth::{cap::Capability, mem::MemoryRegion, syscall, zion::ZError};
use crate::header::{
PciCapabilityPointer, PciDeviceHeader, PciHeaderType, PciMsiCapability, get_header_type,
};
pub struct PciDevice {
memory_region: MemoryRegion,
}
impl PciDevice {
pub fn from(memory_region: MemoryRegion) -> Result<Self, ZError> {
match get_header_type(&memory_region)? {
PciHeaderType::Device => {}
t => {
mammoth::debug!("Invalid header type: {:?}", t);
return Err(ZError::INVALID_ARGUMENT);
}
}
Ok(Self { memory_region })
}
pub fn from_cap(capability: Capability) -> Result<Self, ZError> {
Self::from(MemoryRegion::from_cap(capability)?)
}
pub fn header(&self) -> &PciDeviceHeader {
self.memory_region.as_ref()
}
pub fn get_capability_list(&self) -> Result<Vec<&PciCapabilityPointer>, ZError> {
let status = self.header().status;
if !status.capability_list() {
return Err(ZError::NOT_FOUND);
}
let mut cap_offset = self.header().capability_ptr;
let mut cap_vec = Vec::new();
while cap_offset != 0 {
let cap_ptr: &PciCapabilityPointer = unsafe {
self.memory_region
.raw_ptr_at_offset::<PciCapabilityPointer>(cap_offset as u64)
.as_ref()
.unwrap()
};
cap_vec.push(cap_ptr);
cap_offset = cap_ptr.next_cap_offset;
}
Ok(cap_vec)
}
pub fn register_msi(&mut self) -> Result<Capability, ZError> {
let caps = self.get_capability_list()?;
const MSI_CAP_ID: u8 = 0x05;
let msi_cap: &PciCapabilityPointer = caps
.iter()
.find(|cp| cp.cap_id == MSI_CAP_ID)
.ok_or(ZError::NOT_FOUND)?;
let msi_cap = unsafe {
((msi_cap as *const PciCapabilityPointer) as *mut PciMsiCapability)
.as_mut()
.unwrap()
};
let control = msi_cap.msi_control;
assert!(
control.capable_address_64(),
"We don't handle the non-64bit case for MSI yet."
);
assert!(
control.multi_message_capable() == 0,
"We don't yet handle multi-message capable devices."
);
// FIXME: These probably need to be volatile writes.
let header: &mut PciDeviceHeader = self.memory_region.as_mut();
header.command = header.command.with_interrupt_disable(true);
msi_cap.msi_control = control.with_msi_enable(true);
// For setting addr and data field, see intel ref
// Vol 3. Section 11.11
// TODO: This is hardcoded to APIC 0 currently.
msi_cap.msi_addr_lower = 0xFEE00000;
msi_cap.msi_addr_upper_or_data = 0x0;
let (cap, irq_num) = syscall::register_msi_irq()?;
// TODO: Do we need to set the specific level triggering options for this?
msi_cap.msi_data_if_64 = irq_num as u32;
Ok(cap)
}
}

View file

@ -1,180 +0,0 @@
use bitfield_struct::bitfield;
use mammoth::{mem::MemoryRegion, zion::ZError};
#[bitfield(u16)]
pub struct PciCommand {
io_space_enable: bool,
memory_space_enable: bool,
bus_master_enable: bool,
#[bits(access=RO)]
special_cycles_enable: bool,
#[bits(access=RO)]
memory_write_and_invalidate_enable: bool,
#[bits(access=RO)]
vga_pallette_snoop_enable: bool,
parity_error_response_enable: bool,
#[bits(access=RO)]
wait_cycle_enable: bool,
serr_enable: bool,
fast_back_to_back_enable: bool,
/// Parity is reversed here, set to true to disable.
/// Does not affect MSI.
pub interrupt_disable: bool,
#[bits(5)]
__: u8,
}
#[bitfield(u16)]
pub struct PciStatus {
#[bits(3)]
__: u8,
#[bits(access=RO)]
pub interrupt_status: bool,
#[bits(access=RO)]
pub capability_list: bool,
#[bits(access=RO)]
pub capable_of_66mhz: bool,
___: bool,
#[bits(access=RO)]
pub fast_back_to_back_capabale: bool,
/// Write 1 to clear
pub master_data_parity_error: bool,
#[bits(2, access=RO)]
pub devsel_timing: u8,
/// Write 1 to clear
pub signaled_target_abort: bool,
/// Write 1 to clear
pub received_target_abort: bool,
/// Write 1 to clear
pub received_master_abort: bool,
/// Write 1 to clear
pub signaled_system_erro: bool,
/// Write 1 to clear
pub detected_parity_error: bool,
}
/// Header definitions from https://wiki.osdev.org/PCI
#[repr(C, packed)]
#[derive(Debug)]
pub struct HeaderShared {
pub vendor_id: u16,
pub device_id: u16,
pub command: PciCommand,
pub status: PciStatus,
pub revision_id: u8,
pub prog_if: u8,
pub subclass: u8,
pub class_code: u8,
pub cache_line_size: u8,
pub latency_timer: u8,
pub header_type: u8,
bist: u8,
}
const _: () = assert!(size_of::<HeaderShared>() == 16);
#[repr(C, packed)]
#[derive(Debug)]
pub struct PciDeviceHeader {
pub vendor_id: u16,
pub device_id: u16,
pub command: PciCommand,
pub status: PciStatus,
pub revision_id: u8,
pub prog_if: u8,
pub subclass: u8,
pub class_code: u8,
pub cache_line_size: u8,
pub latency_timer: u8,
pub header_type: u8,
bist: u8,
pub bars: [u32; 6],
pub cardbus_cis_ptr: u32,
pub subsystem_vendor_id: u16,
pub subsystem_id: u16,
pub expansion_rom_address: u32,
pub capability_ptr: u8,
__: [u8; 7],
pub interrupt_line: u8,
pub interrupt_pin: u8,
pub min_grant: u8,
pub max_latency: u8,
}
const _: () = assert!(size_of::<PciDeviceHeader>() == 0x40);
#[repr(C, packed)]
#[derive(Debug)]
pub struct PciCapabilityPointer {
pub cap_id: u8,
pub next_cap_offset: u8,
}
#[bitfield(u16)]
pub struct PciMsiControl {
pub msi_enable: bool,
#[bits(3, access=RO)]
pub multi_message_capable: u8,
#[bits(3)]
pub multi_message_enable: u8,
#[bits(access=RO)]
pub capable_address_64: bool,
#[bits(access=RO)]
pub per_vector_masking: bool,
#[bits(7)]
__: u8,
}
#[repr(C, packed)]
#[derive(Debug)]
pub struct PciMsiCapability {
pub cap_id: u8,
pub next_cap_offset: u8,
pub msi_control: PciMsiControl,
pub msi_addr_lower: u32,
pub msi_addr_upper_or_data: u32,
pub msi_data_if_64: u32,
pub mask: u32,
pub pending: u32,
}
#[derive(Debug)]
pub enum PciHeaderType {
Device,
PciBridge,
CardBusBridge,
}
pub fn get_header_type(memory_region: &MemoryRegion) -> Result<PciHeaderType, ZError> {
let shared: &HeaderShared = memory_region.as_ref();
// The only reference I can find to the high bit here is at
// https://www.khoury.northeastern.edu/~pjd/cs7680/homework/pci-enumeration.html
// > Header Type: bit 7 (0x80) indicates whether it is a multi-function device,
match shared.header_type & (!0x80) {
0x0 => Ok(PciHeaderType::Device),
0x1 => Ok(PciHeaderType::PciBridge),
0x2 => Ok(PciHeaderType::CardBusBridge),
_ => {
mammoth::debug!("Unknown pci header type: {:#x}", shared.header_type);
Err(ZError::INVALID_ARGUMENT)
}
}
}

View file

@ -1,8 +0,0 @@
#![no_std]
extern crate alloc;
mod device;
mod header;
pub use device::PciDevice;

View file

@ -1,13 +0,0 @@
[package]
name = "voyageurs"
version = "0.1.0"
edition = "2021"
[dependencies]
mammoth = { path = "../mammoth" }
yellowstone-yunq = { path = "../yellowstone" }
yunq = {path = "../yunq"}
[build-dependencies]
yunqc = {path = "../../../yunq/rust"}

View file

@ -1,14 +0,0 @@
use std::fs;
fn main() {
let input_file = "../../../sys/voyageurs/lib/voyageurs/voyageurs.yunq";
println!("cargo::rerun-if-changed={input_file}");
let input = fs::read_to_string(input_file).expect("Failed to read input file");
let code = yunqc::codegen(&input).expect("Failed to generate yunq code.");
let out = std::env::var("OUT_DIR").unwrap() + "/yunq.rs";
fs::write(out, code).expect("Failed to write generated code.");
}

View file

@ -1,8 +0,0 @@
#![no_std]
#![feature(box_into_inner)]
use core::include;
include!(concat!(env!("OUT_DIR"), "/yunq.rs"));
pub mod listener;

View file

@ -1,226 +0,0 @@
use alloc::string::ToString;
use mammoth::cap::Capability;
use mammoth::port::PortServer;
use mammoth::thread;
#[repr(u8)]
#[allow(dead_code)]
#[derive(PartialEq, Eq, PartialOrd, Ord, Copy, Clone)]
enum Keycode {
Unknown = 0x0,
A = 0x1,
B = 0x2,
C = 0x3,
D = 0x4,
E = 0x5,
F = 0x6,
G = 0x7,
H = 0x8,
I = 0x9,
J = 0xA,
K = 0xB,
L = 0xC,
M = 0xD,
N = 0xE,
O = 0xF,
P = 0x10,
Q = 0x11,
R = 0x12,
S = 0x13,
T = 0x14,
U = 0x15,
V = 0x16,
W = 0x17,
X = 0x18,
Y = 0x19,
Z = 0x1A,
NUM1 = 0x20,
NUM2 = 0x21,
NUM3 = 0x22,
NUM4 = 0x23,
NUM5 = 0x24,
NUM6 = 0x25,
NUM7 = 0x26,
NUM8 = 0x27,
NUM9 = 0x28,
NUM0 = 0x29,
Space = 0x30,
Enter = 0x31,
Tab = 0x32,
Backspace = 0x33,
Delete = 0x34,
Minus = 0x40,
Equals = 0x41,
LBrace = 0x42,
RBrace = 0x43,
BSlash = 0x44,
FSlash = 0x45,
Semicolon = 0x46,
Quote = 0x47,
Comma = 0x48,
Period = 0x49,
Backtick = 0x4A,
LShift = 0x50,
RShift = 0x51,
LCtrl = 0x52,
RCtrl = 0x53,
LAlt = 0x54,
RAlt = 0x55,
Super = 0x56,
Esc = 0x57,
Up = 0x58,
Down = 0x59,
Left = 0x5A,
Right = 0x5B,
}
impl Keycode {
fn from_scancode(scancode: u16) -> Self {
match scancode as u8 {
0x04 => Keycode::A,
0x05 => Keycode::B,
0x06 => Keycode::C,
0x07 => Keycode::D,
0x08 => Keycode::E,
0x09 => Keycode::F,
0x0A => Keycode::G,
0x0B => Keycode::H,
0x0C => Keycode::I,
0x0D => Keycode::J,
0x0E => Keycode::K,
0x0F => Keycode::L,
0x10 => Keycode::M,
0x11 => Keycode::N,
0x12 => Keycode::O,
0x13 => Keycode::P,
0x14 => Keycode::Q,
0x15 => Keycode::R,
0x16 => Keycode::S,
0x17 => Keycode::T,
0x18 => Keycode::U,
0x19 => Keycode::V,
0x1A => Keycode::W,
0x1B => Keycode::X,
0x1C => Keycode::Y,
0x1D => Keycode::Z,
0x1E => Keycode::NUM1,
0x1F => Keycode::NUM2,
0x20 => Keycode::NUM3,
0x21 => Keycode::NUM4,
0x22 => Keycode::NUM5,
0x23 => Keycode::NUM6,
0x24 => Keycode::NUM7,
0x25 => Keycode::NUM8,
0x26 => Keycode::NUM9,
0x27 => Keycode::NUM0,
0x28 => Keycode::Enter,
0x29 => Keycode::Esc,
0x2A => Keycode::Backspace,
0x2B => Keycode::Tab,
0x2C => Keycode::Space,
0x2D => Keycode::Minus,
0x2E => Keycode::Equals,
0x2F => Keycode::LBrace,
0x30 => Keycode::RBrace,
0x31 => Keycode::BSlash,
0x33 => Keycode::Semicolon,
0x34 => Keycode::Quote,
0x35 => Keycode::Backtick,
0x36 => Keycode::Comma,
0x37 => Keycode::Period,
0x38 => Keycode::FSlash,
0x39 => Keycode::Esc,
_ => Keycode::Unknown,
}
}
}
struct Modifiers(u8);
impl Modifiers {
fn from_scancode(scancode: u16) -> Self {
Self((scancode >> 8) as u8)
}
fn is_shift(&self) -> bool {
((self.0 & 0x20) == 0x20) || ((self.0 & 0x2) == 0x2)
}
}
fn into_char(keycode: Keycode, modifiers: Modifiers) -> char {
match keycode {
k if (Keycode::A..=Keycode::Z).contains(&k) => {
if modifiers.is_shift() {
let chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
chars.as_bytes()[k as usize - Keycode::A as usize] as char
} else {
let chars = "abcdefghijklmnopqrstuvwxyz";
chars.as_bytes()[k as usize - Keycode::A as usize] as char
}
}
k if (Keycode::NUM1..=Keycode::NUM0).contains(&k) => {
if modifiers.is_shift() {
let chars = "!@#$%^&*()";
chars.as_bytes()[k as usize - Keycode::NUM1 as usize] as char
} else {
let chars = "12345687890";
chars.as_bytes()[k as usize - Keycode::NUM1 as usize] as char
}
}
k if (Keycode::Minus..=Keycode::Backtick).contains(&k) => {
if modifiers.is_shift() {
let chars = "_+{}|?:\"<>~";
chars.as_bytes()[k as usize - Keycode::Minus as usize] as char
} else {
let chars = "-=[]\\/;',.`";
chars.as_bytes()[k as usize - Keycode::Minus as usize] as char
}
}
Keycode::Enter => '\n',
Keycode::Space => ' ',
Keycode::Tab => '\t',
Keycode::Backspace => '\x08',
_ => '\0',
}
}
pub trait KeyboardHandler {
fn handle_char(&mut self, c: char);
}
pub fn spawn_keyboard_listener<T>(mut handler: T) -> thread::JoinHandle
where
T: KeyboardHandler + Send + 'static,
{
let listen_port = PortServer::new().unwrap();
let voyageur_endpoint = yellowstone_yunq::from_init_endpoint()
.get_endpoint(&yellowstone_yunq::GetEndpointRequest {
endpoint_name: "voyageurs".to_string(),
})
.unwrap()
.endpoint;
let mut voyageur_client = crate::VoyageursClient::new(Capability::take(voyageur_endpoint));
voyageur_client
.register_keyboard_listener(&crate::KeyboardListener {
port_capability: listen_port.create_client_cap().unwrap(),
})
.unwrap();
let listen_thread = move || loop {
let scancode = listen_port.recv_u16().expect("Failed to recieve scancode");
let keycode = Keycode::from_scancode(scancode);
let modifiers = Modifiers::from_scancode(scancode);
handler.handle_char(into_char(keycode, modifiers))
};
thread::spawn(listen_thread)
}

View file

@ -1,12 +0,0 @@
[package]
name = "yellowstone-yunq"
version = "0.1.0"
edition = "2021"
[dependencies]
mammoth = { path = "../mammoth" }
yunq = {path = "../yunq"}
[build-dependencies]
yunqc = {path = "../../../yunq/rust"}

View file

@ -1,14 +0,0 @@
use std::fs;
fn main() {
let input_file = "../../../sys/yellowstone/lib/yellowstone/yellowstone.yunq";
println!("cargo::rerun-if-changed={input_file}");
let input = fs::read_to_string(input_file).expect("Failed to read input file");
let code = yunqc::codegen(&input).expect("Failed to generate yunq code.");
let out = std::env::var("OUT_DIR").unwrap() + "/yunq.rs";
fs::write(out, code).expect("Failed to write generated code.");
}

View file

@ -1,21 +0,0 @@
#![no_std]
use core::include;
include!(concat!(env!("OUT_DIR"), "/yunq.rs"));
use mammoth::init::INIT_ENDPOINT;
static mut YELLOWSTONE_INIT: Option<YellowstoneClient> = None;
pub fn from_init_endpoint() -> &'static mut YellowstoneClient {
unsafe {
#[allow(static_mut_refs)]
if YELLOWSTONE_INIT.is_none() {
YELLOWSTONE_INIT = Some(YellowstoneClient::new(Capability::take(INIT_ENDPOINT)));
}
#[allow(static_mut_refs)]
YELLOWSTONE_INIT.as_mut().unwrap()
}
}

View file

@ -1,12 +0,0 @@
[package]
name = "yunq-test"
version = "0.1.0"
edition = "2021"
[dependencies]
mammoth = { path = "../mammoth", default-features = false}
yunq = {path = "../yunq", default-features = false}
[build-dependencies]
yunqc = {path = "../../../yunq/rust"}

View file

@ -1,14 +0,0 @@
use std::fs;
fn main() {
let input_file = "../../../lib/yunq/test/example/example.yunq";
println!("cargo::rerun-if-changed={input_file}");
let input = fs::read_to_string(input_file).expect("Failed to read input file");
let code = yunqc::codegen(&input).expect("Failed to generate yunq code.");
let out = std::env::var("OUT_DIR").unwrap() + "/yunq.rs";
fs::write(out, code).expect("Failed to write generated code.");
}

View file

@ -1,140 +0,0 @@
#![no_std]
include!(concat!(env!("OUT_DIR"), "/yunq.rs"));
#[cfg(test)]
mod tests {
use super::*;
extern crate std;
use std::println;
use std::vec;
#[test]
fn basic_serialization() -> Result<(), ZError> {
let basic = Basic {
unsigned_int: 82,
signed_int: -1234,
strn: "abc".to_string(),
};
let mut buf = ByteBuffer::<1024>::new();
let mut caps = Vec::new();
basic.serialize(&mut buf, 0, &mut caps)?;
let parsed = Basic::parse(&buf, 0, &caps)?;
assert!(parsed == basic);
Ok(())
}
#[test]
fn basic_serialization_as_request() -> Result<(), ZError> {
let basic = Basic {
unsigned_int: 82,
signed_int: -1234,
strn: "abc".to_string(),
};
let mut buf = ByteBuffer::<1024>::new();
let mut caps = Vec::new();
let req_id = 12;
basic.serialize_as_request(req_id, &mut buf, &mut caps)?;
assert!(buf.at::<u64>(8)? == req_id);
let parsed = Basic::parse_from_request(&buf, &caps)?;
assert!(parsed == basic);
Ok(())
}
#[test]
fn capability_serialization() -> Result<(), ZError> {
let cap_id = 100;
let cap = Cap { cap: cap_id };
let mut buf = ByteBuffer::<1024>::new();
let mut caps = Vec::new();
cap.serialize(&mut buf, 0, &mut caps)?;
assert!(caps.len() == 1);
assert!(caps[0] == cap_id);
let parsed = Cap::parse(&buf, 0, &caps)?;
assert!(parsed == cap);
Ok(())
}
#[test]
fn repeated_serialization() -> Result<(), ZError> {
let rep = Repeated {
unsigned_ints: vec![0, 1, 3],
};
let mut buf = ByteBuffer::<1024>::new();
let mut caps = Vec::new();
rep.serialize(&mut buf, 0, &mut caps)?;
let parsed = Repeated::parse(&buf, 0, &caps)?;
assert!(parsed == rep);
Ok(())
}
#[test]
fn nested_serialization() -> Result<(), ZError> {
let nested = Nested {
basic: Basic {
unsigned_int: 82,
signed_int: -1234,
strn: "abc".to_string(),
},
cap1: Cap { cap: 37 },
cap2: Cap { cap: 39 },
};
let mut buf = ByteBuffer::<1024>::new();
let mut caps = Vec::new();
nested.serialize(&mut buf, 0, &mut caps)?;
let parsed = Nested::parse(&buf, 0, &caps)?;
assert!(parsed == nested);
Ok(())
}
#[test]
fn repeated_nested_serialization() -> Result<(), ZError> {
let nested = RepeatedNested {
basics: vec![
Basic {
unsigned_int: 82,
signed_int: -1234,
strn: "abc".to_string(),
},
Basic {
unsigned_int: 21,
signed_int: -8,
strn: "def".to_string(),
},
],
caps: vec![Cap { cap: 123 }, Cap { cap: 12343 }],
};
let mut buf = ByteBuffer::<1024>::new();
let mut caps = Vec::new();
nested.serialize(&mut buf, 0, &mut caps)?;
let parsed = RepeatedNested::parse(&buf, 0, &caps)?;
assert!(parsed == nested);
Ok(())
}
}

View file

@ -1,11 +0,0 @@
[package]
name = "yunq"
version = "0.1.0"
edition = "2021"
[dependencies]
mammoth = {path = "../mammoth", default-features = false}
[features]
default = ["hosted"]
hosted = ["mammoth/hosted"]

View file

@ -1,67 +0,0 @@
use alloc::boxed::Box;
use mammoth::zion::ZError;
pub struct ByteBuffer<const N: usize> {
buffer: Box<[u8; N]>,
}
impl<const N: usize> Default for ByteBuffer<N> {
fn default() -> Self {
Self {
buffer: Box::new([0; N]),
}
}
}
impl<const N: usize> ByteBuffer<N> {
pub fn new() -> Self {
ByteBuffer::default()
}
pub fn size(&self) -> u64 {
N as u64
}
pub fn slice(&self, len: usize) -> &[u8] {
&self.buffer[..len]
}
pub fn mut_slice(&mut self) -> &mut [u8] {
&mut self.buffer[..]
}
pub fn write_at<T>(&mut self, offset: usize, obj: T) -> Result<(), ZError> {
if (size_of::<T>() + offset) > N {
return Err(ZError::BUFFER_SIZE);
}
unsafe {
*(self.buffer[offset..].as_mut_ptr() as *mut T) = obj;
}
Ok(())
}
pub fn write_str_at(&mut self, offset: usize, s: &str) -> Result<(), ZError> {
if (s.len() + offset) > N {
return Err(ZError::BUFFER_SIZE);
}
for i in 0..s.len() {
self.buffer[offset + i] = s.as_bytes()[i];
}
Ok(())
}
pub fn at<T: Copy>(&self, offset: usize) -> Result<T, ZError> {
if (size_of::<T>() + offset) > N {
return Err(ZError::BUFFER_SIZE);
}
unsafe { Ok(*(self.buffer[offset..].as_ptr() as *const T)) }
}
pub fn str_at(&self, offset: usize, len: usize) -> Result<&str, ZError> {
if (len + offset) > N {
return Err(ZError::BUFFER_SIZE);
}
alloc::str::from_utf8(&self.buffer[offset..offset + len])
.map_err(|_| ZError::INVALID_ARGUMENT)
}
}

View file

@ -1,38 +0,0 @@
use crate::buffer::ByteBuffer;
use crate::message::YunqMessage;
use alloc::vec::Vec;
use mammoth::cap::Capability;
use mammoth::zion::ZError;
pub fn call_endpoint<Req: YunqMessage, Resp: YunqMessage, const N: usize>(
request_id: u64,
req: &Req,
byte_buffer: &mut ByteBuffer<N>,
endpoint_cap: &Capability,
) -> Result<Resp, ZError> {
let mut cap_buffer = Vec::new();
let length = req.serialize_as_request(request_id, byte_buffer, &mut cap_buffer)?;
let reply_port_cap = mammoth::syscall::endpoint_send(
endpoint_cap,
byte_buffer.slice(16 + length),
cap_buffer.as_slice(),
)?;
// FIXME: Add a way to zero out the byte buffer.
cap_buffer = vec![0; 10];
mammoth::syscall::reply_port_recv(
reply_port_cap,
byte_buffer.mut_slice(),
cap_buffer.as_mut_slice(),
)?;
let resp_code: u64 = byte_buffer.at(8)?;
if resp_code != 0 {
return Err(ZError::from(resp_code));
}
Resp::parse_from_request(byte_buffer, &cap_buffer)
}

View file

@ -1,12 +0,0 @@
#![no_std]
#[macro_use]
extern crate alloc;
mod buffer;
pub mod client;
pub mod message;
pub mod server;
pub use buffer::ByteBuffer;
pub use message::YunqMessage;

View file

@ -1,144 +0,0 @@
use crate::buffer::ByteBuffer;
use alloc::vec::Vec;
use mammoth::zion::z_cap_t;
use mammoth::zion::ZError;
pub const MESSAGE_IDENT: u32 = 0x33441122;
pub const MESSAGE_HEADER_SIZE: usize = 24; // 4x uint32, 1x uint64
const SENTINEL: u32 = 0xBEEFDEAD;
const SERIALIZE_HEADER_SIZE: u32 = 0x10;
pub fn field_offset(offset: usize, field_index: usize) -> usize {
offset + MESSAGE_HEADER_SIZE + (8 * field_index)
}
pub fn parse_repeated<T: Copy, const N: usize>(
buf: &ByteBuffer<N>,
offset: usize,
len: usize,
) -> Result<Vec<T>, ZError> {
let mut repeated = Vec::new();
for i in 0..len {
repeated.push(buf.at::<T>(offset + (i * size_of::<T>()))?);
}
Ok(repeated)
}
pub fn parse_repeated_message<T: YunqMessage, const N: usize>(
buf: &ByteBuffer<N>,
mut offset: usize,
len: usize,
caps: &[z_cap_t],
) -> Result<Vec<T>, ZError> {
let mut repeated = Vec::new();
for _ in 0..len {
// FIXME: This is a bad way to get the length.
let msg_len = buf.at::<u32>(offset + 8)? as usize;
repeated.push(T::parse(buf, offset, caps)?);
offset += msg_len;
}
Ok(repeated)
}
pub fn serialize_repeated<T: Copy, const N: usize>(
buf: &mut ByteBuffer<N>,
offset: usize,
data: &[T],
) -> Result<usize, ZError> {
for (i, val) in data.iter().enumerate() {
buf.write_at(offset + (i * size_of::<T>()), val)?;
}
Ok(offset + size_of_val(data))
}
pub fn serialize_repeated_message<T: YunqMessage, const N: usize>(
buf: &mut ByteBuffer<N>,
mut offset: usize,
data: &[T],
caps: &mut Vec<z_cap_t>,
) -> Result<usize, ZError> {
for item in data {
offset += item.serialize(buf, offset, caps)?;
}
Ok(offset)
}
pub fn serialize_error<const N: usize>(buf: &mut ByteBuffer<N>, err: ZError) {
buf.write_at(0, SENTINEL)
.expect("Failed to serialize SENTINEL");
buf.write_at(4, SERIALIZE_HEADER_SIZE)
.expect("Failed to serialize size");
buf.write_at(8, err as u64)
.expect("Failed to serialize error");
}
pub trait YunqMessage {
fn parse<const N: usize>(
buf: &ByteBuffer<N>,
offset: usize,
caps: &[z_cap_t],
) -> Result<Self, ZError>
where
Self: Sized;
fn parse_from_request<const N: usize>(
buf: &ByteBuffer<N>,
caps: &[z_cap_t],
) -> Result<Self, ZError>
where
Self: Sized,
{
if buf.at::<u32>(0)? != SENTINEL {
return Err(ZError::INVALID_RESPONSE);
}
Self::parse(buf, 16, caps)
}
fn serialize<const N: usize>(
&self,
buf: &mut ByteBuffer<N>,
offset: usize,
caps: &mut Vec<z_cap_t>,
) -> Result<usize, ZError>;
fn serialize_as_request<const N: usize>(
&self,
request_id: u64,
buf: &mut ByteBuffer<N>,
caps: &mut Vec<z_cap_t>,
) -> Result<usize, ZError> {
buf.write_at(0, SENTINEL)?;
buf.write_at(8, request_id)?;
let length = self.serialize(buf, 16, caps)?;
buf.write_at(4, (16 + length) as u32)?;
Ok(length + 16)
}
}
pub struct Empty {}
impl YunqMessage for Empty {
fn parse<const N: usize>(
_buf: &ByteBuffer<N>,
_offset: usize,
_caps: &[z_cap_t],
) -> Result<Self, ZError>
where
Self: Sized,
{
Ok(Self {})
}
fn serialize<const N: usize>(
&self,
_buf: &mut ByteBuffer<N>,
_offset: usize,
_caps: &mut Vec<z_cap_t>,
) -> Result<usize, ZError> {
Ok(0)
}
}

View file

@ -1,144 +0,0 @@
use core::future::Future;
use crate::buffer::ByteBuffer;
use alloc::sync::Arc;
use alloc::vec::Vec;
use mammoth::cap::Capability;
use mammoth::syscall;
use mammoth::task::Spawner;
use mammoth::task::Task;
use mammoth::thread;
use mammoth::thread::JoinHandle;
use mammoth::zion::z_cap_t;
use mammoth::zion::ZError;
pub trait YunqServer {
fn server_loop(&mut self) {
loop {
let mut byte_buffer = ByteBuffer::<1024>::new();
let mut cap_buffer = vec![0; 10];
let (_, _, reply_port_cap) = syscall::endpoint_recv(
self.endpoint_cap(),
byte_buffer.mut_slice(),
&mut cap_buffer,
)
.expect("Failed to call endpoint recv");
let method = byte_buffer
.at::<u64>(8)
.expect("Failed to access request length.");
let resp = self.handle_request(method, &mut byte_buffer, &mut cap_buffer);
match resp {
Ok(resp_len) => syscall::reply_port_send(
reply_port_cap,
byte_buffer.slice(resp_len),
&cap_buffer,
)
.expect("Failed to reply"),
Err(err) => {
crate::message::serialize_error(&mut byte_buffer, err);
syscall::reply_port_send(reply_port_cap, byte_buffer.slice(0x10), &[])
.expect("Failed to reply w/ error")
}
}
}
}
fn endpoint_cap(&self) -> &Capability;
fn create_client_cap(&self) -> Result<Capability, ZError> {
self.endpoint_cap()
.duplicate(!mammoth::zion::kZionPerm_Read)
}
fn handle_request(
&mut self,
method_number: u64,
byte_buffer: &mut ByteBuffer<1024>,
cap_buffer: &mut Vec<z_cap_t>,
) -> Result<usize, ZError>;
}
pub fn spawn_server_thread<T>(mut server: T) -> JoinHandle
where
T: YunqServer + Send + 'static,
{
thread::spawn(move || server.server_loop())
}
pub trait AsyncYunqServer
where
Self: Send + Sync + 'static,
{
fn server_loop(self: Arc<Self>, spawner: Spawner) {
loop {
let mut byte_buffer = ByteBuffer::<1024>::new();
let mut cap_buffer = vec![0; 10];
let (_, _, reply_port_cap) = syscall::endpoint_recv(
self.endpoint_cap(),
byte_buffer.mut_slice(),
&mut cap_buffer,
)
.expect("Failed to call endpoint recv");
let method = byte_buffer
.at::<u64>(8)
.expect("Failed to access request length.");
let self_clone = self.clone();
spawner.spawn(Task::new(async move {
self_clone
.handle_request_and_response(method, byte_buffer, cap_buffer, reply_port_cap)
.await
}));
}
}
fn handle_request_and_response(
&self,
method: u64,
mut byte_buffer: ByteBuffer<1024>,
mut cap_buffer: Vec<u64>,
reply_port_cap: Capability,
) -> impl Future<Output = ()> + Sync + Send {
async move {
let resp = self
.handle_request(method, &mut byte_buffer, &mut cap_buffer)
.await;
match resp {
Ok(resp_len) => syscall::reply_port_send(
reply_port_cap,
byte_buffer.slice(resp_len),
&cap_buffer,
)
.expect("Failed to reply"),
Err(err) => {
crate::message::serialize_error(&mut byte_buffer, err);
syscall::reply_port_send(reply_port_cap, byte_buffer.slice(0x10), &[])
.expect("Failed to reply w/ error")
}
}
}
}
fn endpoint_cap(&self) -> &Capability;
fn create_client_cap(&self) -> Result<Capability, ZError> {
self.endpoint_cap()
.duplicate(!mammoth::zion::kZionPerm_Read)
}
fn handle_request(
&self,
method_number: u64,
byte_buffer: &mut ByteBuffer<1024>,
cap_buffer: &mut Vec<z_cap_t>,
) -> impl Future<Output = Result<usize, ZError>> + Sync + Send;
}
pub fn spawn_async_server_thread<T>(server: Arc<T>, spawner: Spawner) -> JoinHandle
where
T: AsyncYunqServer + Send + Sync + 'static,
{
thread::spawn(move || {
server.server_loop(spawner);
})
}

View file

@ -1,17 +0,0 @@
[package]
name = "denali"
version = "0.1.0"
edition = "2021"
[dependencies]
bitfield-struct = "0.8.0"
mammoth = { path = "../../lib/mammoth" }
pci = { path = "../../lib/pci" }
yunq = { path = "../../lib/yunq" }
yellowstone-yunq = { path = "../../lib/yellowstone" }
[[bin]]
name = "denali"
[build-dependencies]
yunqc = { path = "../../../yunq/rust" }

View file

@ -1,14 +0,0 @@
use std::fs;
fn main() {
let input_file = "denali.yunq";
println!("cargo::rerun-if-changed={input_file}");
let input = fs::read_to_string(input_file).expect("Failed to read input file");
let code = yunqc::codegen(&input).expect("Failed to generate yunq code.");
let out = std::env::var("OUT_DIR").unwrap() + "/yunq.rs";
fs::write(out, code).expect("Failed to write generated code.");
}

Some files were not shown because too many files have changed in this diff Show more