[Zion] Move Memory Mappings to a dedicated tree impl.

This commit is contained in:
Drew Galbraith 2023-11-19 18:45:13 -08:00
parent 3e9923f227
commit e668428d9d
7 changed files with 129 additions and 47 deletions

View file

@ -0,0 +1,57 @@
#include "lib/memory_mapping_tree.h"
#include "debug/debug.h"
glcr::ErrorCode MemoryMappingTree::AddInMemoryObject(
uint64_t vaddr, const glcr::RefPtr<MemoryObject>& object) {
// TODO: This implementation is inefficient as it traverses the tree a lot, we
// should have some solution with iterators to avoid this.
auto predecessor_or = mapping_tree_.Predecessor(vaddr);
if (predecessor_or && predecessor_or.value().get().vaddr_limit > vaddr) {
return glcr::ALREADY_EXISTS;
}
if (mapping_tree_.Find(vaddr)) {
return glcr::ALREADY_EXISTS;
}
auto successor_or = mapping_tree_.Successor(vaddr);
if (successor_or &&
successor_or.value().get().vaddr_base < vaddr + object->size()) {
return glcr::ALREADY_EXISTS;
}
mapping_tree_.Insert(vaddr, MemoryMapping{
.vaddr_base = vaddr,
.vaddr_limit = vaddr + object->size(),
.mem_object = object,
});
return glcr::OK;
}
glcr::ErrorCode FreeMemoryRange(uint64_t vaddr_base, uint64_t vaddr_limit) {
dbgln("Unhandled free memory range!");
return glcr::OK;
}
glcr::ErrorOr<uint64_t> MemoryMappingTree::GetPhysicalPageAtVaddr(
uint64_t vaddr) {
auto mapping_or = GetMemoryMappingForAddr(vaddr);
if (!mapping_or) {
return glcr::NOT_FOUND;
}
MemoryMapping& mapping = mapping_or.value();
return mapping.mem_object->PhysicalPageAtOffset(vaddr - mapping.vaddr_base);
}
glcr::Optional<glcr::Ref<MemoryMappingTree::MemoryMapping>>
MemoryMappingTree::GetMemoryMappingForAddr(uint64_t vaddr) {
auto mapping_or = mapping_tree_.Predecessor(vaddr + 1);
if (!mapping_or) {
return mapping_or;
}
MemoryMapping& mapping = mapping_or.value();
if (mapping.vaddr_base + mapping.mem_object->size() <= vaddr) {
return {};
}
return mapping_or;
}

View file

@ -0,0 +1,41 @@
#pragma once
#include <glacier/container/binary_tree.h>
#include "object/memory_object.h"
/* AddressRangeTree stores memory objects referred to by
* ranges and ensures those ranges do not overlap.
*/
class MemoryMappingTree {
public:
MemoryMappingTree() = default;
MemoryMappingTree(const MemoryMappingTree&) = delete;
MemoryMappingTree(MemoryMappingTree&&) = delete;
glcr::ErrorCode AddInMemoryObject(uint64_t vaddr,
const glcr::RefPtr<MemoryObject>& object);
glcr::ErrorCode FreeMemoryRange(uint64_t vaddr_base, uint64_t vaddr_limit);
glcr::ErrorOr<uint64_t> GetPhysicalPageAtVaddr(uint64_t vaddr);
private:
struct MemoryMapping {
uint64_t vaddr_base;
uint64_t vaddr_limit;
glcr::RefPtr<MemoryObject> mem_object;
};
// TODO: Consider adding a red-black tree implementation here.
// As is this tree functions about as well as a linked list
// because mappings are likely to be added in near-perfect ascedning order.
// Also worth considering creating a special tree implementation for
// just this purpose, or maybe a BinaryTree implementation that accepts
// ranges rather than a single key.
glcr::BinaryTree<uint64_t, MemoryMapping> mapping_tree_;
glcr::Optional<glcr::Ref<MemoryMapping>> GetMemoryMappingForAddr(
uint64_t vaddr);
};