[Zion] Move Memory Mappings to a dedicated tree impl.
This commit is contained in:
parent
3e9923f227
commit
e668428d9d
7 changed files with 129 additions and 47 deletions
|
|
@ -35,15 +35,15 @@ uint64_t AddressSpace::GetNextMemMapAddr(uint64_t size) {
|
|||
return addr;
|
||||
}
|
||||
|
||||
void AddressSpace::MapInMemoryObject(
|
||||
glcr::ErrorCode AddressSpace::MapInMemoryObject(
|
||||
uint64_t vaddr, const glcr::RefPtr<MemoryObject>& mem_obj) {
|
||||
memory_mappings_.Insert(vaddr, {.vaddr = vaddr, .mem_obj = mem_obj});
|
||||
return mapping_tree_.AddInMemoryObject(vaddr, mem_obj);
|
||||
}
|
||||
|
||||
uint64_t AddressSpace::MapInMemoryObject(
|
||||
glcr::ErrorOr<uint64_t> AddressSpace::MapInMemoryObject(
|
||||
const glcr::RefPtr<MemoryObject>& mem_obj) {
|
||||
uint64_t vaddr = GetNextMemMapAddr(mem_obj->size());
|
||||
memory_mappings_.Insert(vaddr, {.vaddr = vaddr, .mem_obj = mem_obj});
|
||||
RET_ERR(mapping_tree_.AddInMemoryObject(vaddr, mem_obj));
|
||||
return vaddr;
|
||||
}
|
||||
|
||||
|
|
@ -55,38 +55,23 @@ bool AddressSpace::HandlePageFault(uint64_t vaddr) {
|
|||
#if K_VMAS_DEBUG
|
||||
dbgln("[VMAS] Page Fault!");
|
||||
#endif
|
||||
if (vaddr < kPageSize) {
|
||||
// Invalid page access.
|
||||
return false;
|
||||
}
|
||||
|
||||
if (user_stacks_.IsValidStack(vaddr)) {
|
||||
MapPage(cr3_, vaddr, phys_mem::AllocatePage());
|
||||
return true;
|
||||
}
|
||||
|
||||
auto mapping_or = GetMemoryMappingForAddr(vaddr);
|
||||
if (!mapping_or) {
|
||||
return false;
|
||||
}
|
||||
MemoryMapping& mapping = mapping_or.value();
|
||||
uint64_t offset = vaddr - mapping.vaddr;
|
||||
uint64_t physical_addr = mapping.mem_obj->PhysicalPageAtOffset(offset);
|
||||
if (physical_addr == 0) {
|
||||
dbgln("WARN: Memory object returned invalid physical addr.");
|
||||
auto offset_or = mapping_tree_.GetPhysicalPageAtVaddr(vaddr);
|
||||
if (!offset_or.ok()) {
|
||||
return false;
|
||||
}
|
||||
#if K_VMAS_DEBUG
|
||||
dbgln("[VMAS] Mapping P({x}) at V({x})", physical_addr, vaddr);
|
||||
#endif
|
||||
MapPage(cr3_, vaddr, physical_addr);
|
||||
MapPage(cr3_, vaddr, offset_or.value());
|
||||
return true;
|
||||
}
|
||||
|
||||
glcr::Optional<glcr::Ref<AddressSpace::MemoryMapping>>
|
||||
AddressSpace::GetMemoryMappingForAddr(uint64_t vaddr) {
|
||||
auto mapping_or = memory_mappings_.Predecessor(vaddr + 1);
|
||||
if (!mapping_or) {
|
||||
return mapping_or;
|
||||
}
|
||||
MemoryMapping& mapping = mapping_or.value();
|
||||
if (mapping.vaddr + mapping.mem_obj->size() <= vaddr) {
|
||||
return {};
|
||||
}
|
||||
return mapping_or;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@
|
|||
#include <stdint.h>
|
||||
|
||||
#include "include/ztypes.h"
|
||||
#include "lib/memory_mapping_tree.h"
|
||||
#include "memory/user_stack_manager.h"
|
||||
#include "object/memory_object.h"
|
||||
|
||||
|
|
@ -69,16 +70,17 @@ class AddressSpace : public KernelObject {
|
|||
|
||||
// Maps in a memory object at a specific address.
|
||||
// Note this is unsafe for now as it may clobber other mappings.
|
||||
void MapInMemoryObject(uint64_t vaddr,
|
||||
const glcr::RefPtr<MemoryObject>& mem_obj);
|
||||
[[nodiscard]] glcr::ErrorCode MapInMemoryObject(
|
||||
uint64_t vaddr, const glcr::RefPtr<MemoryObject>& mem_obj);
|
||||
|
||||
uint64_t MapInMemoryObject(const glcr::RefPtr<MemoryObject>& mem_obj);
|
||||
[[nodiscard]] glcr::ErrorOr<uint64_t> MapInMemoryObject(
|
||||
const glcr::RefPtr<MemoryObject>& mem_obj);
|
||||
|
||||
// Kernel Mappings.
|
||||
uint64_t AllocateKernelStack();
|
||||
|
||||
// Returns true if the page fault has been resolved.
|
||||
bool HandlePageFault(uint64_t vaddr);
|
||||
[[nodiscard]] bool HandlePageFault(uint64_t vaddr);
|
||||
|
||||
private:
|
||||
friend class glcr::MakeRefCountedFriend<AddressSpace>;
|
||||
|
|
@ -88,19 +90,5 @@ class AddressSpace : public KernelObject {
|
|||
UserStackManager user_stacks_;
|
||||
uint64_t next_memmap_addr_ = 0x20'00000000;
|
||||
|
||||
struct MemoryMapping {
|
||||
uint64_t vaddr;
|
||||
glcr::RefPtr<MemoryObject> mem_obj;
|
||||
};
|
||||
|
||||
// TODO: Consider adding a red-black tree implementation here.
|
||||
// As is this tree functions about as well as a linked list
|
||||
// because mappings are likely to be added in near-perfect ascedning order.
|
||||
// Also worth considering creating a special tree implementation for
|
||||
// just this purpose, or maybe a BinaryTree implementation that accepts
|
||||
// ranges rather than a single key.
|
||||
glcr::BinaryTree<uint64_t, MemoryMapping> memory_mappings_;
|
||||
|
||||
glcr::Optional<glcr::Ref<MemoryMapping>> GetMemoryMappingForAddr(
|
||||
uint64_t vaddr);
|
||||
MemoryMappingTree mapping_tree_;
|
||||
};
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue