Create a MemoryObject class and use it to load programs.
This commit is contained in:
parent
1fda0f3fae
commit
b06c76e477
9 changed files with 209 additions and 49 deletions
|
|
@ -30,6 +30,41 @@ uint64_t AddressSpace::GetNextMemMapAddr(uint64_t size) {
|
|||
return addr;
|
||||
}
|
||||
|
||||
void AddressSpace::MapInMemoryObject(uint64_t vaddr,
|
||||
const RefPtr<MemoryObject>& mem_obj) {
|
||||
memory_mappings_.PushBack({.vaddr = vaddr, .mem_obj = mem_obj});
|
||||
}
|
||||
|
||||
uint64_t* AddressSpace::AllocateKernelStack() {
|
||||
return gKernelStackManager->AllocateKernelStack();
|
||||
}
|
||||
|
||||
bool AddressSpace::HandlePageFault(uint64_t vaddr) {
|
||||
MemoryMapping* mapping = GetMemoryMappingForAddr(vaddr);
|
||||
if (mapping == nullptr) {
|
||||
return false;
|
||||
}
|
||||
uint64_t offset = vaddr - mapping->vaddr;
|
||||
uint64_t physical_addr = mapping->mem_obj->PhysicalPageAtOffset(offset);
|
||||
if (physical_addr == 0) {
|
||||
dbgln("WARN: Memory object returned invalid physical addr.");
|
||||
return false;
|
||||
}
|
||||
dbgln("Mapping P(%m) at V(%m)", physical_addr, vaddr);
|
||||
MapPage(cr3_, vaddr, physical_addr);
|
||||
return true;
|
||||
}
|
||||
|
||||
AddressSpace::MemoryMapping* AddressSpace::GetMemoryMappingForAddr(
|
||||
uint64_t vaddr) {
|
||||
auto iter = memory_mappings_.begin();
|
||||
while (iter != memory_mappings_.end()) {
|
||||
if ((vaddr >= (*iter).vaddr) &&
|
||||
(vaddr < ((*iter).vaddr + (*iter).mem_obj->size()))) {
|
||||
return &(*iter);
|
||||
}
|
||||
++iter;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,8 +2,9 @@
|
|||
|
||||
#include <stdint.h>
|
||||
|
||||
#include "debug/debug.h"
|
||||
#include "lib/ref_ptr.h"
|
||||
#include "memory/user_stack_manager.h"
|
||||
#include "object/memory_object.h"
|
||||
|
||||
// VirtualMemory class holds a memory space for an individual process.
|
||||
//
|
||||
|
|
@ -51,13 +52,28 @@ class AddressSpace {
|
|||
uint64_t AllocateUserStack();
|
||||
uint64_t GetNextMemMapAddr(uint64_t size);
|
||||
|
||||
// Maps in a memory object at a specific address.
|
||||
// Note this is unsafe for now as it may clobber other mappings.
|
||||
void MapInMemoryObject(uint64_t vaddr, const RefPtr<MemoryObject>& mem_obj);
|
||||
|
||||
// Kernel Mappings.
|
||||
uint64_t* AllocateKernelStack();
|
||||
|
||||
// Returns true if the page fault has been resolved.
|
||||
bool HandlePageFault(uint64_t vaddr);
|
||||
|
||||
private:
|
||||
AddressSpace(uint64_t cr3) : cr3_(cr3) {}
|
||||
uint64_t cr3_ = 0;
|
||||
|
||||
UserStackManager user_stacks_;
|
||||
uint64_t next_memmap_addr_ = 0x20'00000000;
|
||||
|
||||
struct MemoryMapping {
|
||||
uint64_t vaddr;
|
||||
RefPtr<MemoryObject> mem_obj;
|
||||
};
|
||||
LinkedList<MemoryMapping> memory_mappings_;
|
||||
|
||||
MemoryMapping* GetMemoryMappingForAddr(uint64_t vaddr);
|
||||
};
|
||||
|
|
|
|||
63
zion/object/memory_object.cpp
Normal file
63
zion/object/memory_object.cpp
Normal file
|
|
@ -0,0 +1,63 @@
|
|||
#include "object/memory_object.h"
|
||||
|
||||
#include "boot/boot_info.h"
|
||||
#include "debug/debug.h"
|
||||
#include "memory/physical_memory.h"
|
||||
|
||||
MemoryObject::MemoryObject(uint64_t size) : size_(size) {
|
||||
if ((size & 0xFFF) != 0) {
|
||||
size_ = (size & ~0xFFF) + 0x1000;
|
||||
dbgln("MemoryObject: aligned %x to %x", size, size_);
|
||||
}
|
||||
// FIXME: Do this lazily.
|
||||
uint64_t num_pages = size_ / 0x1000;
|
||||
for (uint64_t i = 0; i < num_pages; i++) {
|
||||
phys_page_list_.PushBack(0);
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t MemoryObject::PhysicalPageAtOffset(uint64_t offset) {
|
||||
if (offset > size_) {
|
||||
panic("Invalid offset");
|
||||
}
|
||||
uint64_t page_num = offset / 0x1000;
|
||||
return PageNumberToPhysAddr(page_num);
|
||||
}
|
||||
|
||||
void MemoryObject::CopyBytesToObject(uint64_t source, uint64_t length) {
|
||||
if (length > size_) {
|
||||
panic("Copy overruns memory object: %x too large for %x", length, size_);
|
||||
}
|
||||
uint64_t hhdm = boot::GetHigherHalfDirectMap();
|
||||
uint64_t page_number = 0;
|
||||
while (length > 0) {
|
||||
uint64_t physical = hhdm + PageNumberToPhysAddr(page_number);
|
||||
|
||||
uint64_t bytes_to_copy = length >= 0x1000 ? 0x1000 : length;
|
||||
|
||||
uint8_t* srcptr = reinterpret_cast<uint8_t*>(source);
|
||||
uint8_t* destptr = reinterpret_cast<uint8_t*>(physical);
|
||||
for (uint64_t i = 0; i < bytes_to_copy; i++) {
|
||||
destptr[i] = srcptr[i];
|
||||
}
|
||||
|
||||
length -= bytes_to_copy;
|
||||
source += 0x1000;
|
||||
page_number++;
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t MemoryObject::PageNumberToPhysAddr(uint64_t page_num) {
|
||||
auto iter = phys_page_list_.begin();
|
||||
uint64_t index = 0;
|
||||
while (index < page_num) {
|
||||
++iter;
|
||||
index++;
|
||||
}
|
||||
|
||||
if (*iter == 0) {
|
||||
dbgln("Allocating page num %u for mem object", page_num);
|
||||
*iter = phys_mem::AllocatePage();
|
||||
}
|
||||
return *iter;
|
||||
}
|
||||
30
zion/object/memory_object.h
Normal file
30
zion/object/memory_object.h
Normal file
|
|
@ -0,0 +1,30 @@
|
|||
#pragma once
|
||||
|
||||
#include "lib/linked_list.h"
|
||||
#include "object/kernel_object.h"
|
||||
|
||||
/*
|
||||
* MemoryObject is a page-aligned set of memory that corresponds
|
||||
* to physical pages.
|
||||
*
|
||||
* It can be mapped in to one or more address spaces.
|
||||
* */
|
||||
class MemoryObject : public KernelObject {
|
||||
public:
|
||||
MemoryObject(uint64_t size);
|
||||
|
||||
uint64_t size() { return size_; }
|
||||
uint64_t num_pages() { return size_ / 0x1000; }
|
||||
|
||||
uint64_t PhysicalPageAtOffset(uint64_t offset);
|
||||
|
||||
void CopyBytesToObject(uint64_t source, uint64_t length);
|
||||
|
||||
private:
|
||||
// Always stores the full page-aligned size.
|
||||
uint64_t size_;
|
||||
|
||||
uint64_t PageNumberToPhysAddr(uint64_t page_num);
|
||||
|
||||
LinkedList<uint64_t> phys_page_list_;
|
||||
};
|
||||
Loading…
Add table
Add a link
Reference in a new issue